8000 HDFS-12431. [JDK17] Upgrade JUnit from 4 to 5 in hadoop-hdfs Part3. by zhtttylz · Pull Request #7626 · apache/hadoop · GitHub
[go: up one dir, main page]
More Web Proxy on the site http://driver.im/
Skip to content

HDFS-12431. [JDK17] Upgrade JUnit from 4 to 5 in hadoop-hdfs Part3. #7626

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Apr 22, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 20 additions & 0 deletions hadoop-hdfs-project/hadoop-hdfs/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -241,6 +241,26 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-annotations</artifactId>
</dependency>
<dependency>
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Has this pr upraded all the junit 4 cases? If yes, we should remove the junit 4 dependency.

<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-api</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-engine</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-params</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.junit.platform</groupId>
<artifactId>junit-platform-launcher</artifactId>
<scope>test</scope>
</dependency>
</dependencies>

<build>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,23 +34,22 @@
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferTestCase;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.security.TestPermission;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.jupiter.api.AfterEach;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import static org.junit.Assert.*;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;

import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.MethodSource;


/**
* This tests enabling NN sending the established QOP back to client,
* in encrypted message, using block access token key.
*/
@RunWith(Parameterized.class)
public class TestBlockTokenWrappingQOP extends SaslDataTransferTestCase {
public static final Logger LOG = LoggerFactory.getLogger(TestPermission.class);

Expand All @@ -61,7 +60,6 @@ public class TestBlockTokenWrappingQOP extends SaslDataTransferTestCase {
private String configKey;
private String qopValue;

@Parameterized.Parameters
public static Collection<Object[]> qopSettings() {
// if configured with privacy, the negotiated QOP should auth-conf
// similarly for the other two
Expand All @@ -72,12 +70,13 @@ public static Collection<Object[]> qopSettings() {
});
}

public TestBlockTokenWrappingQOP(String configKey, String qopValue) {
this.configKey = configKey;
this.qopValue = qopValue;
public void initTestBlockTokenWrappingQOP(String pconfigKey, String pqopValue)
throws Exception {
this.configKey = pconfigKey;
this.qopValue = pqopValue;
setup();
}

@Before
public void setup() throws Exception {
conf = createSecureConfig(this.configKey);
conf.set(DFS_NAMENODE_RPC_ADDRESS_AUXILIARY_KEY, "12000");
Expand Down Expand Up @@ -109,15 +108,17 @@ public void setup() throws Exception {
dfs = (DistributedFileSystem) FileSystem.get(uriAuxiliary, conf);
}

@After
@AfterEach
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}

@Test
public void testAddBlockWrappingQOP() throws Exception {
@MethodSource("qopSettings")
@ParameterizedTest
public void testAddBlockWrappingQOP(String pconfigKey, String pqopValue) throws Exception {
initTestBlockTokenWrappingQOP(pconfigKey, pqopValue);
final String src = "/testAddBlockWrappingQOP";
final Path path = new Path(src);

Expand All @@ -132,8 +133,10 @@ public void testAddBlockWrappingQOP() throws Exception {
assertEquals(this.qopValue, new String(secret));
}

@Test
public void testAppendWrappingQOP() throws Exception {
@MethodSource("qopSettings")
@ParameterizedTest
public void testAppendWrappingQOP(String pconfigKey, String pqopValue) throws Exception {
initTestBlockTokenWrappingQOP(pconfigKey, pqopValue);
final String src = "/testAppendWrappingQOP";
final Path path = new Path(src);

Expand All @@ -155,8 +158,11 @@ public void testAppendWrappingQOP() throws Exception {
assertEquals(this.qopValue, new String(secret));
}

@Test
public void testGetBlockLocationWrappingQOP() throws Exception {
@MethodSource("qopSettings")
@ParameterizedTest
public void testGetBlockLocationWrappingQOP(String pconfigKey, String pqopValue)
throws Exception {
initTestBlockTokenWrappingQOP(pconfigKey, pqopValue);
final String src = "/testGetBlockLocationWrappingQOP";
final Path path = new Path(src);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,16 +32,19 @@
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.security.token.Token;
import org.junit.Before;
import org.junit.Test;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;

import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_OVERWRITE_DOWNSTREAM_DERIVED_QOP_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SEND_QOP_ENABLED;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_AUXILIARY_KEY;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_ENCRYPT_DATA_OVERWRITE_DOWNSTREAM_NEW_QOP_KEY;
import static org.junit.Assert.*;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertTrue;


/**
Expand All @@ -58,7 +61,7 @@ public class TestMultipleNNPortQOP extends SaslDataTransferTestCase {

private static HdfsConfiguration clusterConf;

@Before
@BeforeEach
public void setup() throws Exception {
clusterConf = createSecureConfig(
"authentication,integrity,privacy");
Expand Down Expand Up @@ -259,7 +262,7 @@ public void testMultipleNNPortOverwriteDownStream() throws Exception {
// datanodes become equal to auth.
// Note that it is not necessarily the case for all datanodes,
// since a datanode may be always at the last position in pipelines.
assertTrue("At least two qops should be auth", count >= 2);
assertTrue(count >= 2, "At least two qops should be auth");

clientConf.set(HADOOP_RPC_PROTECTION, "integrity");
FileSystem fsIntegrity = FileSystem.get(uriIntegrityPort, clientConf);
Expand All @@ -268,7 +271,7 @@ public void testMultipleNNPortOverwriteDownStream() throws Exception {
.map(dn -> dn.getSaslClient().getTargetQOP())
.filter("auth"::equals)
.count();
assertTrue("At least two qops should be auth", count >= 2);
assertTrue(count >= 2, "At least two qops should be auth");

clientConf.set(HADOOP_RPC_PROTECTION, "authentication");
FileSystem fsAuth = FileSystem.get(uriAuthPort, clientConf);
Expand All @@ -277,7 +280,7 @@ public void testMultipleNNPortOverwriteDownStream() throws Exception {
.map(dn -> dn.getSaslServer().getNegotiatedQOP())
.filter("auth"::equals)
.count();
assertEquals("All qops should be auth", 3, count);
assertEquals(3, count, "All qops should be auth");
} finally {
if (cluster != null) {
cluster.shutdown();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@

package org.apache.hadoop.hdfs.client.impl;

import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;

import java.io.DataOutputStream;
import java.io.IOException;
Expand Down
Loading
0