8000 HDFS-12431. [JDK17] Upgrade JUnit from 4 to 5 in hadoop-hdfs Part7. by zhtttylz · Pull Request #7794 · apache/hadoop · GitHub
[go: up one dir, main page]
More Web Proxy on the site http://driver.im/
Skip to content

HDFS-12431. [JDK17] Upgrade JUnit from 4 to 5 in hadoop-hdfs Part7. #7794

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Jul 12, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
8000
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@
*/
package org.apache.hadoop.hdfs;

import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;

import java.io.IOException;
import java.util.List;
Expand All @@ -35,9 +35,9 @@
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.Replica;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.slf4j.event.Level;

public class TestPipelines {
Expand All @@ -57,13 +57,13 @@ public class TestPipelines {
setConfiguration();
}

@Before
@BeforeEach
public void startUpCluster() throws IOException {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL_FACTOR).build();
fs = cluster.getFileSystem();
}

@After
@AfterEach
public void shutDownCluster() throws IOException {
if (fs != null) {
fs.close();
Expand Down Expand Up @@ -108,10 +108,9 @@ public void pipeline_01() throws IOException {
Replica r =
cluster.getFsDatasetTestUtils(dn).fetchReplica(lb.get(0).getBlock());

assertTrue("Replica on DN " + dn + " shouldn't be null", r != null);
assertEquals("Should be RBW replica on " + dn
+ " after sequence of calls append()/write()/hflush()",
HdfsServerConstants.ReplicaState.RBW, r.getState());
assertTrue(r != null, "Replica on DN " + dn + " shouldn't be null");
assertEquals(HdfsServerConstants.ReplicaState.RBW, r.getState(),
"Should be RBW replica on " + dn + " after sequence of calls append()/write()/hflush()");
}
ofs.close();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,9 @@
package org.apache.hadoop.hdfs;

import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;

import java.io.EOFException;
import java.io.IOException;
Expand Down Expand Up @@ -53,10 +53,11 @@
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
Expand All @@ -82,11 +83,13 @@ public class TestPread {
LoggerFactory.getLogger(TestPread.class.getName());
private final GenericTestUtils.LogCapturer dfsClientLog =
GenericTestUtils.LogCapturer.captureLogs(DFSClient.LOG);
@BeforeClass

@BeforeAll
public static void setLogLevel() {
GenericTestUtils.setLogLevel(DFSClient.LOG, org.apache.log4j.Level.WARN);
}
@Before

@BeforeEach
public void setup() {
simulatedStorage = false;
isHedgedRead = false;
Expand All @@ -107,10 +110,10 @@ private void writeFile(FileSystem fileSys, Path name) throws IOException {
// should throw an exception
res = e;
}
assertTrue("Error reading beyond file boundary.", res != null);
assertTrue(res != null, "Error reading beyond file boundary.");
in.close();
if (!fileSys.delete(name, true))
assertTrue("Cannot delete file", false);
assertTrue(false, "Cannot delete file");

// now create the real file
DFSTestUtil.createFile(fileSys, name, fileSize, fileSize,
Expand All @@ -119,9 +122,9 @@ private void writeFile(FileSystem fileSys, Path name) throws IOException {

private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message) {
for (int idx = 0; idx < actual.length; idx++) {
assertEquals(message+" byte "+(from+idx)+" differs. expected "+
expected[from+idx]+" actual "+actual[idx],
actual[idx], expected[from+idx]);
assertEquals(actual[idx], expected[from + idx],
message + " byte " + (from + idx)
+ " differs. expected " + expected[from + idx] + " actual " + actual[idx]);
actual[idx] = 0;
}
}
Expand All @@ -140,17 +143,17 @@ private void doPread(FSDataInputStream stm, long position, byte[] buffer,
while (nread < length) {
int nbytes =
stm.read(position + nread, buffer, offset + nread, length - nread);
assertTrue("Error in pread", nbytes > 0);
assertTrue(nbytes > 0, "Error in pread");
nread += nbytes;
}

if (dfstm != null) {
if (isHedgedRead) {
9E7A assertTrue("Expected read statistic to be incremented", length <= dfstm
.getReadStatistics().getTotalBytesRead() - totalRead);
assertTrue(length <= dfstm.getReadStatistics().getTotalBytesRead() - totalRead,
"Expected read statistic to be incremented");
} else {
assertEquals("Expected read statistic to be incremented", length, dfstm
.getReadStatistics().getTotalBytesRead() - totalRead);
assertEquals(length, dfstm.getReadStatistics().getTotalBytesRead() - totalRead,
"Expected read statistic to be incremented");
}
}
}
Expand Down Expand Up @@ -221,7 +224,7 @@ private void pReadFile(FileSystem fileSys, Path name) throws IOException {
// should throw an exception
res = e;
}
assertTrue("Error reading beyond file boundary.", res != null);
assertTrue(res != null, "Error reading beyond file boundary.");

stm.close();
}
Expand Down Expand Up @@ -553,9 +556,9 @@ public Void call() throws IOException {
});
try {
future.get(4, TimeUnit.SECONDS);
Assert.fail();
Assertions.fail();
} catch (ExecutionException ee) {
assertTrue(ee.toString(), ee.getCause() instanceof EOFException);
assertTrue(ee.getCause() instanceof EOFException, ee.toString());
} finally {
future.cancel(true);
executor.shutdown();
Expand All @@ -570,7 +573,8 @@ public Void call() throws IOException {
* retrying on a different datanode or by refreshing data nodes and retrying each data node one
* more time.
*/
@Test(timeout=120000)
@Test
@Timeout(value = 120)
public void testGetFromOneDataNodeExceptionLogging() throws IOException {
// With maxBlockAcquireFailures = 0, we would try on each datanode only once and if
// we fail on all three datanodes, we fail the read request.
Expand Down Expand Up @@ -647,7 +651,8 @@ public Void answer(InvocationOnMock invocation) throws Throwable {
/**
* Test the case where we always hit IOExceptions, causing the read request to fail.
*/
@Test(timeout=60000)
@Test
@Timeout(value = 60)
public void testFetchFromDataNodeExceptionLoggingFailedRequest()
throws IOException {
testFetchFromDataNodeExceptionLoggingFailedRequest(0);
Expand Down Expand Up @@ -723,7 +728,8 @@ public Void answer(InvocationOnMock invocation) throws Throwable {
}
}

@Test(timeout=30000)
@Test
@Timeout(value = 30)
public void testHedgedReadFromAllDNFailed() throws IOException {
Configuration conf = new Configuration();
int numHedgedReadPoolThreads = 5;
Expand Down Expand Up @@ -768,7 +774,7 @@ public Void answer(InvocationOnMock invocation) throws Throwable {
byte[] buffer = new byte[64 * 1024];
input = dfsClient.open(filename);
input.read(0, buffer, 0, 1024);
Assert.fail("Reading the block should have thrown BlockMissingException");
Assertions.fail("Reading the block should have thrown BlockMissingException");
} catch (BlockMissingException e) {
// The result of 9 is due to 2 blocks by 4 iterations plus one because
// hedgedReadOpsLoopNumForTesting is incremented at start of the loop.
Expand Down Expand Up @@ -808,7 +814,8 @@ public void testPreadFailureWithChangedBlockLocations() throws Exception {
* 7. Consider next calls to getBlockLocations() always returns DN3 as last
* location.<br>
*/
@Test(timeout = 60000)
@Test
@Timeout(value = 60)
public void testPreadHedgedFailureWithChangedBlockLocations()
throws Exception {
isHedgedRead = true;
Expand Down Expand Up @@ -929,10 +936,10 @@ public Boolean get() {
byte[] buf = new byte[1024];
int n = din.read(0, buf, 0, data.length());
assertEquals(data.length(), n);
assertEquals("Data should be read", data, new String(buf, 0, n));
assertTrue("Read should complete with maximum " + maxFailures
+ " failures, but completed with " + din.failures,
din.failures <= maxFailures);
assertEquals(data, new String(buf, 0, n), "Data should be read");
assertTrue(din.failures <= maxFailures,
"Read should complete with maximum " + maxFailures
+ " failures, but completed with " + din.failures);
DFSClient.LOG.info("Read completed");
}
}
Expand Down
Loading
0