diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index df9e3907bdaf2..8340c610d8980 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -2110,4 +2110,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final long DFS_LEASE_HARDLIMIT_DEFAULT = HdfsClientConfigKeys.DFS_LEASE_HARDLIMIT_DEFAULT; + public static final String DFS_NAMENODE_DATANODE_LIST_CACHE_EXPIRATION_MS_KEY = + "dfs.namenode.datanode.list.cache.expiration.ms"; + public static final long DFS_NAMENODE_DATANODE_LIST_CACHE_EXPIRATION_MS_DEFAULT = 0; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 1d680702cdcb7..b32ab6da2c088 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -862,9 +862,9 @@ public void refreshBlockPlacementPolicy(Configuration conf) { /** Dump meta data to out. */ public void metaSave(PrintWriter out) { assert namesystem.hasReadLock(RwLockMode.BM); - final List live = new ArrayList(); - final List dead = new ArrayList(); - datanodeManager.fetchDatanodes(live, dead, false); + final List live = new ArrayList<>(); + final List dead = new ArrayList<>(); + datanodeManager.fetchDatanodes(live, dead, false, false); out.println("Live Datanodes: " + live.size()); out.println("Dead Datanodes: " + dead.size()); @@ -1722,6 +1722,9 @@ public void verifyReplication(String src, public boolean isSufficientlyReplicated(BlockInfo b) { // Compare against the lesser of the minReplication and number of live DNs. final int liveReplicas = countNodes(b).liveReplicas(); + if (liveReplicas == 0) { + return false; + } if (hasMinStorage(b, liveReplicas)) { return true; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 01f1af9624d05..195af6bd6e9bb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -24,6 +24,8 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.cache.Cache; +import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses; @@ -70,6 +72,7 @@ import java.net.InetSocketAddress; import java.net.UnknownHostException; import java.util.*; +import java.util.concurrent.ExecutionException; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; @@ -230,6 +233,9 @@ public class DatanodeManager { private final boolean randomNodeOrderEnabled; + /** Cached map of DatanodeReportType -> list of DatanodeDescriptor for metrics purposes. */ + private volatile Cache> datanodeListSnapshots = null; + DatanodeManager(final BlockManager blockManager, final Namesystem namesystem, final Configuration conf) throws IOException { this.namesystem = namesystem; @@ -364,6 +370,18 @@ public class DatanodeManager { this.randomNodeOrderEnabled = conf.getBoolean( DFSConfigKeys.DFS_NAMENODE_RANDOM_NODE_ORDER_ENABLED, DFSConfigKeys.DFS_NAMENODE_RANDOM_NODE_ORDER_ENABLED_DEFAULT); + + long datanodeListCacheExpirationMs = + conf.getLong(DFSConfigKeys.DFS_NAMENODE_DATANODE_LIST_CACHE_EXPIRATION_MS_KEY, + DFSConfigKeys.DFS_NAMENODE_DATANODE_LIST_CACHE_EXPIRATION_MS_DEFAULT); + if (datanodeListCacheExpirationMs > 0) { + LOG.info("Using cached DN list for metrics, expiration time = {} ms.", + datanodeListCacheExpirationMs); + datanodeListSnapshots = CacheBuilder.newBuilder() + .expireAfterWrite(datanodeListCacheExpirationMs, TimeUnit.MILLISECONDS) + .build(); + } + } /** @@ -945,6 +963,11 @@ void addDatanode(final DatanodeDescriptor node) { synchronized(this) { host2DatanodeMap.remove(datanodeMap.put(node.getDatanodeUuid(), node)); } + Cache> tmpDatanodeListSnapshots = + datanodeListSnapshots; + if (tmpDatanodeListSnapshots != null) { + tmpDatanodeListSnapshots.invalidateAll(); + } networktopology.add(node); // may throw InvalidTopologyException host2DatanodeMap.add(node); @@ -963,6 +986,11 @@ private void wipeDatanode(final DatanodeID node) { synchronized (this) { host2DatanodeMap.remove(datanodeMap.remove(key)); } + Cache> tmpDatanodeListSnapshots = + datanodeListSnapshots; + if (tmpDatanodeListSnapshots != null) { + tmpDatanodeListSnapshots.invalidateAll(); + } if (LOG.isDebugEnabled()) { LOG.debug("{}.wipeDatanode({}): storage {} is removed from datanodeMap.", getClass().getSimpleName(), node, key); @@ -1438,7 +1466,7 @@ public int getNumLiveDataNodes() { /** @return the number of dead datanodes. */ public int getNumDeadDataNodes() { - return getDatanodeListForReport(DatanodeReportType.DEAD).size(); + return getDatanodeListSnapshotForReport(DatanodeReportType.DEAD).size(); } /** @return the number of datanodes. */ @@ -1453,12 +1481,12 @@ public List getDecommissioningNodes() { // There is no need to take namesystem reader lock as // getDatanodeListForReport will synchronize on datanodeMap // A decommissioning DN may be "alive" or "dead". - return getDatanodeListForReport(DatanodeReportType.DECOMMISSIONING); + return getDatanodeListSnapshotForReport(DatanodeReportType.DECOMMISSIONING); } /** @return list of datanodes that are entering maintenance. */ public List getEnteringMaintenanceNodes() { - return getDatanodeListForReport(DatanodeReportType.ENTERING_MAINTENANCE); + return getDatanodeListSnapshotForReport(DatanodeReportType.ENTERING_MAINTENANCE); } /* Getter and Setter for stale DataNodes related attributes */ @@ -1534,15 +1562,19 @@ void setNumStaleStorages(int numStaleStorages) { /** Fetch live and dead datanodes. */ public void fetchDatanodes(final List live, - final List dead, final boolean removeDecommissionNode) { + final List dead, final boolean removeDecommissionNode, boolean useCache) { if (live == null && dead == null) { throw new HadoopIllegalArgumentException("Both live and dead lists are null"); } - // There is no need to take namesystem reader lock as - // getDatanodeListForReport will synchronize on datanodeMap - final List results = - getDatanodeListForReport(DatanodeReportType.ALL); + List results; + if (useCache) { + results = getDatanodeListSnapshotForReport(DatanodeReportType.ALL); + } else { + // There is no need to take namesystem reader lock as + // getDatanodeListForReport will synchronize on datanodeMap + results = getDatanodeListForReport(DatanodeReportType.ALL); + } for(DatanodeDescriptor node : results) { if (isDatanodeDead(node)) { if (dead != null) { @@ -1635,6 +1667,37 @@ private DatanodeID parseDNFromHostsEntry(String hostLine) { return dnId; } + public void refreshDatanodeListSnapshot(long newExpirationMs) { + if (newExpirationMs <= 0) { + LOG.info("New config is non-positive ({}), disabling DN list cache", newExpirationMs); + datanodeListSnapshots = null; + } else { + LOG.info("Resetting DN list cache with new expiration time {}ms", newExpirationMs); + datanodeListSnapshots = CacheBuilder.newBuilder() + .expireAfterWrite(newExpirationMs, TimeUnit.MILLISECONDS) + .build(); + } + } + + /** + * Low impact version of {@link #getDatanodeListForReport} with possible stale + * data for low impact usage (metrics). + */ + public List getDatanodeListSnapshotForReport( + final DatanodeReportType type) { + Cache> tmpDatanodeListSnapshots = + datanodeListSnapshots; + if (tmpDatanodeListSnapshots == null) { + return getDatanodeListForReport(type); + } + try { + return tmpDatanodeListSnapshots.get(type, () -> getDatanodeListForReport(type)); + } catch (ExecutionException e) { + // Fallback if cache fails + return getDatanodeListForReport(type); + } + } + /** For generating datanode reports */ public List getDatanodeListForReport( final DatanodeReportType type) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index c4aa37782531b..cda368c0e6a75 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -5788,8 +5788,8 @@ public int getNumDeadDataNodes() { @Metric({"NumDecomLiveDataNodes", "Number of datanodes which have been decommissioned and are now live"}) public int getNumDecomLiveDataNodes() { - final List live = new ArrayList(); - getBlockManager().getDatanodeManager().fetchDatanodes(live, null, false); + final List live = new ArrayList<>(); + getBlockManager().getDatanodeManager().fetchDatanodes(live, null, false, true); int liveDecommissioned = 0; for (DatanodeDescriptor node : live) { liveDecommissioned += node.isDecommissioned() ? 1 : 0; @@ -5801,8 +5801,8 @@ public int getNumDecomLiveDataNodes() { @Metric({"NumDecomDeadDataNodes", "Number of datanodes which have been decommissioned and are now dead"}) public int getNumDecomDeadDataNodes() { - final List dead = new ArrayList(); - getBlockManager().getDatanodeManager().fetchDatanodes(null, dead, false); + final List dead = new ArrayList<>(); + getBlockManager().getDatanodeManager().fetchDatanodes(null, dead, false, true); int deadDecommissioned = 0; for (DatanodeDescriptor node : dead) { deadDecommissioned += node.isDecommissioned() ? 1 : 0; @@ -5814,8 +5814,8 @@ public int getNumDecomDeadDataNodes() { @Metric({"NumInServiceLiveDataNodes", "Number of live datanodes which are currently in service"}) public int getNumInServiceLiveDataNodes() { - final List live = new ArrayList(); - getBlockManager().getDatanodeManager().fetchDatanodes(live, null, true); + final List live = new ArrayList<>(); + getBlockManager().getDatanodeManager().fetchDatanodes(live, null, true, true); int liveInService = live.size(); for (DatanodeDescriptor node : live) { liveInService -= node.isInMaintenance() ? 1 : 0; @@ -5827,8 +5827,8 @@ public int getNumInServiceLiveDataNodes() { @Metric({"VolumeFailuresTotal", "Total number of volume failures across all Datanodes"}) public int getVolumeFailuresTotal() { - List live = new ArrayList(); - getBlockManager().getDatanodeManager().fetchDatanodes(live, null, false); + List live = new ArrayList<>(); + getBlockManager().getDatanodeManager().fetchDatanodes(live, null, false, true); int volumeFailuresTotal = 0; for (DatanodeDescriptor node: live) { volumeFailuresTotal += node.getVolumeFailures(); @@ -5840,8 +5840,8 @@ public int getVolumeFailuresTotal() { @Metric({"EstimatedCapacityLostTotal", "An estimate of the total capacity lost due to volume failures"}) public long getEstimatedCapacityLostTotal() { - List live = new ArrayList(); - getBlockManager().getDatanodeManager().fetchDatanodes(live, null, false); + List live = new ArrayList<>(); + getBlockManager().getDatanodeManager().fetchDatanodes(live, null, false, true); long estimatedCapacityLostTotal = 0; for (DatanodeDescriptor node: live) { VolumeFailureSummary volumeFailureSummary = node.getVolumeFailureSummary(); @@ -6730,10 +6730,9 @@ public int getThreads() { */ @Override // NameNodeMXBean public String getLiveNodes() { - final Map> info = - new HashMap>(); - final List live = new ArrayList(); - blockManager.getDatanodeManager().fetchDatanodes(live, null, false); + final Map> info = new HashMap<>(); + final List live = new ArrayList<>(); + blockManager.getDatanodeManager().fetchDatanodes(live, null, false, true); for (DatanodeDescriptor node : live) { ImmutableMap.Builder innerinfo = ImmutableMap.builder(); @@ -6785,10 +6784,9 @@ public String getLiveNodes() { */ @Override // NameNodeMXBean public String getDeadNodes() { - final Map> info = - new HashMap>(); - final List dead = new ArrayList(); - blockManager.getDatanodeManager().fetchDatanodes(null, dead, false); + final Map> info = new HashMap<>(); + final List dead = new ArrayList<>(); + blockManager.getDatanodeManager().fetchDatanodes(null, dead, false, true); for (DatanodeDescriptor node : dead) { Map innerinfo = ImmutableMap.builder() .put("lastContact", getLastContact(node)) @@ -6916,10 +6914,9 @@ public String getNodeUsage() { float min = 0; float dev = 0; - final Map> info = - new HashMap>(); - final List live = new ArrayList(); - blockManager.getDatanodeManager().fetchDatanodes(live, null, true); + final Map> info = new HashMap<>(); + final List live = new ArrayList<>(); + blockManager.getDatanodeManager().fetchDatanodes(live, null, true, true); for (Iterator it = live.iterator(); it.hasNext();) { DatanodeDescriptor node = it.next(); if (!node.isInService()) { @@ -9094,8 +9091,8 @@ public long getBytesInFuture() { @Metric({"NumInMaintenanceLiveDataNodes", "Number of live Datanodes which are in maintenance state"}) public int getNumInMaintenanceLiveDataNodes() { - final List live = new ArrayList(); - getBlockManager().getDatanodeManager().fetchDatanodes(live, null, true); + final List live = new ArrayList<>(); + getBlockManager().getDatanodeManager().fetchDatanodes(live, null, true, true); int liveInMaintenance = 0; for (DatanodeDescriptor node : live) { liveInMaintenance += node.isInMaintenance() ? 1 : 0; @@ -9107,8 +9104,8 @@ public int getNumInMaintenanceLiveDataNodes() { @Metric({"NumInMaintenanceDeadDataNodes", "Number of dead Datanodes which are in maintenance state"}) public int getNumInMaintenanceDeadDataNodes() { - final List dead = new ArrayList(); - getBlockManager().getDatanodeManager().fetchDatanodes(null, dead, true); + final List dead = new ArrayList<>(); + getBlockManager().getDatanodeManager().fetchDatanodes(null, dead, true, true); int deadInMaintenance = 0; for (DatanodeDescriptor node : dead) { deadInMaintenance += node.isInMaintenance() ? 1 : 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 2b889dd2adc5c..8c496daa4ddc1 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -6712,4 +6712,14 @@ Enables observer reads for clients. This should only be enabled when clients are using routers. + + dfs.namenode.datanode.list.cache.expiration.ms + 0 + + Set to a positive number to cache values for DatanodeManager.getDatanodeListForReport for + performance purpose. Milliseconds for cache expiration from insertion. 0 or negative value + to disable this cache. + Non metrics usage will bypass this cache (fsck, datanodeReport, etc.) + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index af4b60fd3d6b6..0f0f4d9e09624 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -703,7 +703,7 @@ public static int firstDnWithBlock(MiniDFSCluster cluster, ExtendedBlock b) */ public static long getLiveDatanodeCapacity(DatanodeManager dm) { final List live = new ArrayList(); - dm.fetchDatanodes(live, null, false); + dm.fetchDatanodes(live, null, false, false); long capacity = 0; for (final DatanodeDescriptor dn : live) { capacity += dn.getCapacity(); @@ -715,8 +715,8 @@ public static long getLiveDatanodeCapacity(DatanodeManager dm) { * Return the capacity of the given live DN. */ public static long getDatanodeCapacity(DatanodeManager dm, int index) { - final List live = new ArrayList(); - dm.fetchDatanodes(live, null, false); + final List live = new ArrayList<>(); + dm.fetchDatanodes(live, null, false, false); return live.get(index).getCapacity(); } @@ -737,7 +737,7 @@ public static void waitForDatanodeStatus(DatanodeManager dm, int expectedLive, Thread.sleep(timeout); live.clear(); dead.clear(); - dm.fetchDatanodes(live, dead, false); + dm.fetchDatanodes(live, dead, false, false); currTotalCapacity = 0; volFails = 0; for (final DatanodeDescriptor dd : live) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java index c6c69a4d6e228..115a12504fa9d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java @@ -78,7 +78,7 @@ public void testBlocksScheduledCounter() throws IOException { ArrayList dnList = new ArrayList(); final DatanodeManager dm = cluster.getNamesystem().getBlockManager( ).getDatanodeManager(); - dm.fetchDatanodes(dnList, dnList, false); + dm.fetchDatanodes(dnList, dnList, false, false); DatanodeDescriptor dn = dnList.get(0); assertEquals(1, dn.getBlocksScheduled()); @@ -103,7 +103,7 @@ public void testScheduledBlocksCounterShouldDecrementOnAbandonBlock() DatanodeManager datanodeManager = cluster.getNamesystem().getBlockManager() .getDatanodeManager(); ArrayList dnList = new ArrayList(); - datanodeManager.fetchDatanodes(dnList, dnList, false); + datanodeManager.fetchDatanodes(dnList, dnList, false, false); for (DatanodeDescriptor descriptor : dnList) { assertEquals(0, descriptor.getBlocksScheduled(), "Blocks scheduled should be 0 for " + descriptor.getName()); @@ -169,7 +169,7 @@ public void testScheduledBlocksCounterDecrementOnDeletedBlock() cluster.getNamesystem().getBlockManager().getDatanodeManager(); ArrayList dnList = new ArrayList(); - datanodeManager.fetchDatanodes(dnList, dnList, false); + datanodeManager.fetchDatanodes(dnList, dnList, false, false); // 3. mark a couple of blocks as corrupt LocatedBlock block = NameNodeAdapter @@ -230,7 +230,7 @@ public void testBlocksScheduledCounterOnTruncate() throws Exception { cluster.getNamesystem().getBlockManager().getDatanodeManager(); ArrayList dnList = new ArrayList(); - datanodeManager.fetchDatanodes(dnList, dnList, false); + datanodeManager.fetchDatanodes(dnList, dnList, false, false); // 3. restart the stopped datanode cluster.restartDataNode(0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java index dad93c85dd30a..12e79280b9c62 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java @@ -231,7 +231,7 @@ public void testNullCheckSumWhenDNRestarted() // fetch live DN final List live = new ArrayList(); cluster.getNameNode().getNamesystem().getBlockManager() - .getDatanodeManager().fetchDatanodes(live, null, false); + .getDatanodeManager().fetchDatanodes(live, null, false, false); assertTrue(live.size() == 2, "DN start should be success and live dn should be 2"); assertTrue(fs.getFileStatus(file).getLen() == chunkSize, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java index 9715083c373b3..f7f3b6c3a8b28 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java @@ -1503,10 +1503,10 @@ public void testCountOnDecommissionedNodeList() throws IOException{ ns.getBlockManager().getDatanodeManager(); List live = new ArrayList(); // fetchDatanode with false should return livedecommisioned node - datanodeManager.fetchDatanodes(live, null, false); + datanodeManager.fetchDatanodes(live, null, false, false); assertTrue(1==live.size()); // fetchDatanode with true should not return livedecommisioned node - datanodeManager.fetchDatanodes(live, null, true); + datanodeManager.fetchDatanodes(live, null, true, false); assertTrue(0==live.size()); }finally { shutdownCluster(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java index 8a8361661f089..62df6d131d140 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java @@ -505,7 +505,7 @@ public void testBusyAfterDecommissionNode() throws Exception { decommissionNode(0, decommisionNodes, AdminStates.DECOMMISSION_INPROGRESS); final List live = new ArrayList(); - bm.getDatanodeManager().fetchDatanodes(live, null, false); + bm.getDatanodeManager().fetchDatanodes(live, null, false, false); int liveDecommissioning = 0; for (DatanodeDescriptor node : live) { liveDecommissioning += node.isDecommissionInProgress() ? 1 : 0; @@ -519,7 +519,7 @@ public void testBusyAfterDecommissionNode() throws Exception { int blocksScheduled = 0; final List dnList = new ArrayList<>(); fsn.getBlockManager().getDatanodeManager().fetchDatanodes(dnList, null, - false); + false, false); for (DatanodeDescriptor dn : dnList) { blocksScheduled += dn.getBlocksScheduled(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java index 36057f7bcf19b..804b04ceb8513 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java @@ -594,7 +594,7 @@ public void testPendingReConstructionBlocksForSameDN() throws Exception { cluster.getNamesystem().getBlockManager().getDatanodeManager(); ArrayList dnList = new ArrayList(); - datanodeManager.fetchDatanodes(dnList, dnList, false); + datanodeManager.fetchDatanodes(dnList, dnList, false, false); LocatedBlock block = NameNodeAdapter .getBlockLocations(cluster.getNameNode(), filePath.toString(), 0, 1) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java index a1fdcb4afeced..11aea331c4978 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java @@ -112,7 +112,7 @@ public void testBlockScheduledUpdate() throws Exception { // make sure the scheduled block size has been updated for each DN storage // in NN final List dnList = new ArrayList<>(); - fsn.getBlockManager().getDatanodeManager().fetchDatanodes(dnList, null, false); + fsn.getBlockManager().getDatanodeManager().fetchDatanodes(dnList, null, false, false); for (DatanodeDescriptor dn : dnList) { assertEquals(1, dn.getBlocksScheduled()); } @@ -125,7 +125,7 @@ public void testBlockScheduledUpdate() throws Exception { // check the scheduled block size again final List dnList = new ArrayList<>(); - fsn.getBlockManager().getDatanodeManager().fetchDatanodes(dnList, null, false); + fsn.getBlockManager().getDatanodeManager().fetchDatanodes(dnList, null, false, false); for (DatanodeDescriptor dn : dnList) { assertEquals(0, dn.getBlocksScheduled()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java index 008b2b0498e2f..cc80ebf927025 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java @@ -422,7 +422,7 @@ public void testDecommissionStatusAfterDNRestart() throws Exception { DataNodeProperties dataNodeProperties = cluster.stopDataNode(dnName); final List dead = new ArrayList(); while (true) { - dm.fetchDatanodes(null, dead, false); + dm.fetchDatanodes(null, dead, false, false); if (dead.size() == 1) { break; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java index 36f3b4d51f090..cf91409e410b6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java @@ -79,7 +79,7 @@ public void testVolumeSize() throws Exception { // Ensure the data reported for each data node is right final List live = new ArrayList(); final List dead = new ArrayList(); - dm.fetchDatanodes(live, dead, false); + dm.fetchDatanodes(live, dead, false, false); assertTrue(live.size() == 1);