8000 HDFS-17172. Support FSNamesystemLock Parameters reconfigurable by haiyang1987 · Pull Request #6002 · apache/hadoop · GitHub
[go: up one dir, main page]
More Web Proxy on the site http://driver.im/
Skip to content

HDFS-17172. Support FSNamesystemLock Parameters reconfigurable #6002

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Nov 17, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -9124,4 +9124,31 @@ Path getEnclosingRoot(final String srcArg) throws IOException {
return new Path("/");
}
}

public void setMetricsEnabled(boolean metricsEnabled) {
this.fsLock.setMetricsEnabled(metricsEnabled);
}

@VisibleForTesting
public boolean isMetricsEnabled() {
return this.fsLock.isMetricsEnabled();
}

public void setReadLockReportingThresholdMs(long readLockReportingThresholdMs) {
this.fsLock.setReadLockReportingThresholdMs(readLockReportingThresholdMs);
}

@VisibleForTesting
public long getReadLockReportingThresholdMs() {
return this.fsLock.getReadLockReportingThresholdMs();
}

public void setWriteLockReportingThresholdMs(long writeLockReportingThresholdMs) {
this.fsLock.setWriteLockReportingThresholdMs(writeLockReportingThresholdMs);
}

@VisibleForTesting
public long getWriteLockReportingThresholdMs() {
return this.fsLock.getWriteLockReportingThresholdMs();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ class FSNamesystemLock {
@VisibleForTesting
protected ReentrantReadWriteLock coarseLock;

private final boolean metricsEnabled;
private volatile boolean metricsEnabled;
private final MutableRatesWithAggregation detailedHoldTimeMetrics;
private final Timer timer;

Expand All @@ -79,14 +79,14 @@ class FSNamesystemLock {
private final long lockSuppressWarningIntervalMs;

/** Threshold (ms) for long holding write lock report. */
private final long writeLockReportingThresholdMs;
private volatile long writeLockReportingThresholdMs;
/** Last time stamp for write lock. Keep the longest one for multi-entrance.*/
private long writeLockHeldTimeStampNanos;
/** Frequency limiter used for reporting long write lock hold times. */
private final LogThrottlingHelper writeLockReportLogger;

/** Threshold (ms) for long holding read lock report. */
private final long readLockReportingThresholdMs;
private volatile long readLockReportingThresholdMs;
/**
* Last time stamp for read lock. Keep the longest one for
* multi-entrance. This is ThreadLocal since there could be
Expand Down Expand Up @@ -462,6 +462,33 @@ private static String getMetricName(String operationName, boolean isWrite) {
LOCK_METRIC_SUFFIX;
}

@VisibleForTesting
public void setMetricsEnabled(boolean metricsEnabled) {
this.metricsEnabled = metricsEnabled;
}

public boolean isMetricsEnabled() {
return metricsEnabled;
}

public void setReadLockReportingThresholdMs(long readLockReportingThresholdMs) {
this.readLockReportingThresholdMs = readLockReportingThresholdMs;
}

@VisibleForTesting
public long getReadLockReportingThresholdMs() {
return readLockReportingThresholdMs;
}

public void setWriteLockReportingThresholdMs(long writeLockReportingThresholdMs) {
this.writeLockReportingThresholdMs = writeLockReportingThresholdMs;
}

@VisibleForTesting
public long getWriteLockReportingThresholdMs() {
return writeLockReportingThresholdMs;
}

/**
* Read lock Held Info.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,12 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NN_NOT_BECOME_ACTIVE_IN_SAFEMODE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LOCK_DETAILED_METRICS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LOCK_DETAILED_METRICS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BLOCKPLACEMENTPOLICY_MIN_BLOCKS_FOR_WRITE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BLOCKPLACEMENTPOLICY_MIN_BLOCKS_FOR_WRITE_KEY;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT;
Expand Down Expand Up @@ -371,7 +377,10 @@ public enum OperationCategory {
DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK,
DFS_NAMENODE_BLOCKPLACEMENTPOLICY_MIN_BLOCKS_FOR_WRITE_KEY,
IPC_SERVER_LOG_SLOW_RPC,
IPC_SERVER_LOG_SLOW_RPC_THRESHOLD_MS_KEY));
IPC_SERVER_LOG_SLOW_RPC_THRESHOLD_MS_KEY,
DFS_NAMENODE_LOCK_DETAILED_METRICS_KEY,
DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY,
DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY));

private static final String USAGE = "Usage: hdfs namenode ["
+ StartupOption.BACKUP.getName() + "] | \n\t["
Expand Down Expand Up @@ -2378,6 +2387,10 @@ protected String reconfigurePropertyImpl(String property, String newVal)
} else if (property.equals(IPC_SERVER_LOG_SLOW_RPC) ||
(property.equals(IPC_SERVER_LOG_SLOW_RPC_THRESHOLD_MS_KEY))) {
return reconfigureLogSlowRPC(property, newVal);
} else if (property.equals(DFS_NAMENODE_LOCK_DETAILED_METRICS_KEY)
|| property.equals(DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY)
|| property.equals(DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY)) {
return reconfigureFSNamesystemLockMetricsParameters(property, newVal);
} else {
throw new ReconfigurationException(property, newVal, getConf().get(
property));
Expand Down Expand Up @@ -2736,6 +2749,48 @@ private String reconfigureMinBlocksForWrite(String property, String newValue)
}
}

private String reconfigureFSNamesystemLockMetricsParameters(final String property,
final String newVal) throws ReconfigurationException {
String result;
try {
switch (property) {
case DFS_NAMENODE_LOCK_DETAILED_METRICS_KEY: {
if (newVal != null && !newVal.equalsIgnoreCase("true") &&
!newVal.equalsIgnoreCase("false")) {
throw new IllegalArgumentException(newVal + " is not boolean value");
}
boolean enable = (newVal == null ?
DFS_NAMENODE_LOCK_DETAILED_METRICS_DEFAULT : Boolean.parseBoolean(newVal));
result = Boolean.toString(enable);
namesystem.setMetricsEnabled(enable);
break;
}
case DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY: {
long readLockReportingThresholdMs = (newVal == null ?
DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT : Long.parseLong(newVal));
result = Long.toString(readLockReportingThresholdMs);
namesystem.setReadLockReportingThresholdMs(readLockReportingThresholdMs);
break;
}
case DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY: {
long writeLockReportingThresholdMs = (newVal == null ?
DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_DEFAULT : Long.parseLong(newVal));
result = Long.toString(writeLockReportingThresholdMs);
namesystem.setWriteLockReportingThresholdMs(writeLockReportingThresholdMs);
break;
}
default: {
throw new IllegalArgumentException("Unexpected property " + property + " in " +
"reconfigureFSNamesystemLockMetricsParameters");
}
}
LOG.info("RECONFIGURE* changed FSNamesystemLockMetricsParameters {} to {}", property, result);
return result;
} catch (IllegalArgumentException e){
throw new ReconfigurationException(property, newVal, getConf().get(property), e);
}
}

@Override // ReconfigurableBase
protected Configuration getNewConf() {
return new HdfsConfiguration();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,9 @@
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_SERVER_LOG_SLOW_RPC_THRESHOLD_MS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_NODES_TO_REPORT_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LOCK_DETAILED_METRICS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY;
import static org.junit.Assert.*;

import org.slf4j.Logger;
Expand Down Expand Up @@ -747,6 +750,78 @@ public void testReconfigureLogSlowRPC() throws ReconfigurationException {
assertEquals(nnrs.getClientRpcServer().getLogSlowRPCThresholdTime(), 20000);
}

@Test
public void testReconfigureFSNamesystemLockMetricsParameters()
throws ReconfigurationException, IOException {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFS_NAMENODE_LOCK_DETAILED_METRICS_KEY, false);
long defaultReadLockMS = 1000L;
conf.setLong(DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY, defaultReadLockMS);
long defaultWriteLockMS = 1000L;
conf.setLong(DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY, defaultWriteLockMS);

try (MiniDFSCluster newCluster = new MiniDFSCluster.Builder(conf).build()) {
newCluster.waitActive();
final NameNode nameNode = newCluster.getNameNode();
final FSNamesystem fsNamesystem = nameNode.getNamesystem();
// verify default value.
assertFalse(fsNamesystem.isMetricsEnabled());
assertEquals(defaultReadLockMS, fsNamesystem.getReadLockReportingThresholdMs());
assertEquals(defaultWriteLockMS, fsNamesystem.getWriteLockReportingThresholdMs());

// try invalid metricsEnabled.
try {
nameNode.reconfigurePropertyImpl(DFS_NAMENODE_LOCK_DETAILED_METRICS_KEY,
"non-boolean");
fail("should not reach here");
} catch (ReconfigurationException e) {
assertEquals(
"Could not change property dfs.namenode.lock.detailed-metrics.enabled from " +
"'false' to 'non-boolean'", e.getMessage());
}

// try correct metricsEnabled.
nameNode.reconfigurePropertyImpl(DFS_NAMENODE_LOCK_DETAILED_METRICS_KEY, "true");
assertTrue(fsNamesystem.isMetricsEnabled());

nameNode.reconfigurePropertyImpl(DFS_NAMENODE_LOCK_DETAILED_METRICS_KEY, null);
assertFalse(fsNamesystem.isMetricsEnabled());

// try invalid readLockMS.
try {
nameNode.reconfigureProperty(DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY,
"non-numeric");
fail("Should not reach here");
} catch (ReconfigurationException e) {
assertEquals("Could not change property " +
"dfs.namenode.read-lock-reporting-threshold-ms from '" +
defaultReadLockMS + "' to 'non-numeric'", e.getMessage());
}

// try correct readLockMS.
nameNode.reconfigureProperty(DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY,
"20000");
assertEquals(fsNamesystem.getReadLockReportingThresholdMs(), 20000);


// try invalid writeLockMS.
try {
nameNode.reconfigureProperty(
DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY, "non-numeric");
fail("Should not reach here");
} catch (ReconfigurationException e) {
assertEquals("Could not change property " +
"dfs.namenode.write-lock-reporting-threshold-ms from '" +
defaultWriteLockMS + "' to 'non-numeric'", e.getMessage());
}

// try correct writeLockMS.
nameNode.reconfigureProperty(
DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY, "100000");
assertEquals(fsNamesystem.getWriteLockReportingThresholdMs(), 100000);
}
}

@After
public void shutDown() throws IOException {
if (cluster != null) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LOCK_DETAILED_METRICS_KEY;
import static org.apache.hadoop.hdfs.client.HdfsAdmin.TRASH_PERMISSION;
import static org.hamcrest.CoreMatchers.allOf;
import static org.hamcrest.CoreMatchers.anyOf;
Expand Down Expand Up @@ -442,7 +442,7 @@ public void testNameNodeGetReconfigurableProperties() throws IOException, Interr
final List<String> outs = Lists.newArrayList();
final List<String> errs = Lists.newArrayList();
getReconfigurableProperties("namenode", address, outs, errs);
assertEquals(25, outs.size());
assertEquals(28, outs.size());
assertTrue(outs.get(0).contains("Reconfigurable properties:"));
assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY, outs.get(1));
assertEquals(DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY, outs.get(2));
Expand All @@ -457,7 +457,7 @@ public void testNameNodeGetReconfigurableProperties() throws IOException, Interr
assertEquals(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK, outs.get(11));
assertEquals(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT, outs.get(12));
assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, outs.get(13));
assertEquals(DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY, outs.get(14));
assertEquals(DFS_NAMENODE_LOCK_DETAILED_METRICS_KEY, outs.get(14));
assertEquals(errs.size(), 0);
}

Expand Down
0