Skip to content

Commit 8dd731f

Browse files
committed
HDFS-17878. Reduce frequency of getDatanodeListForReport calls for metrics
1 parent 90150af commit 8dd731f

File tree

14 files changed

+132
-55
lines changed

14 files changed

+132
-55
lines changed

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2110,4 +2110,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
21102110
public static final long DFS_LEASE_HARDLIMIT_DEFAULT =
21112111
HdfsClientConfigKeys.DFS_LEASE_HARDLIMIT_DEFAULT;
21122112

2113+
public static final String DFS_NAMENODE_DATANODE_LIST_CACHE_EXPIRATION_MS_KEY =
2114+
"dfs.namenode.datanode.list.cache.expiration.ms";
2115+
public static final long DFS_NAMENODE_DATANODE_LIST_CACHE_EXPIRATION_MS_DEFAULT = 0;
21132116
}

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -862,9 +862,9 @@ public void refreshBlockPlacementPolicy(Configuration conf) {
862862
/** Dump meta data to out. */
863863
public void metaSave(PrintWriter out) {
864864
assert namesystem.hasReadLock(RwLockMode.BM);
865-
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
866-
final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
867-
datanodeManager.fetchDatanodes(live, dead, false);
865+
final List<DatanodeDescriptor> live = new ArrayList<>();
866+
final List<DatanodeDescriptor> dead = new ArrayList<>();
867+
datanodeManager.fetchDatanodes(live, dead, false, false);
868868
out.println("Live Datanodes: " + live.size());
869869
out.println("Dead Datanodes: " + dead.size());
870870

@@ -1722,6 +1722,9 @@ public void verifyReplication(String src,
17221722
public boolean isSufficientlyReplicated(BlockInfo b) {
17231723
// Compare against the lesser of the minReplication and number of live DNs.
17241724
final int liveReplicas = countNodes(b).liveReplicas();
1725+
if (liveReplicas == 0) {
1726+
return false;
1727+
}
17251728
if (hasMinStorage(b, liveReplicas)) {
17261729
return true;
17271730
}

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java

Lines changed: 72 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,8 @@
2424
import org.apache.hadoop.classification.VisibleForTesting;
2525
import org.apache.hadoop.util.Preconditions;
2626

27+
import org.apache.hadoop.thirdparty.com.google.common.cache.Cache;
28+
import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder;
2729
import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
2830
import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses;
2931

@@ -70,6 +72,7 @@
7072
import java.net.InetSocketAddress;
7173
import java.net.UnknownHostException;
7274
import java.util.*;
75+
import java.util.concurrent.ExecutionException;
7376
import java.util.concurrent.ThreadLocalRandom;
7477
import java.util.concurrent.TimeUnit;
7578
import java.util.function.Consumer;
@@ -230,6 +233,9 @@ public class DatanodeManager {
230233

231234
private final boolean randomNodeOrderEnabled;
232235

236+
/** Cached map of DatanodeReportType -> list of DatanodeDescriptor for metrics purposes. */
237+
private volatile Cache<DatanodeReportType, List<DatanodeDescriptor>> datanodeListSnapshots = null;
238+
233239
DatanodeManager(final BlockManager blockManager, final Namesystem namesystem,
234240
final Configuration conf) throws IOException {
235241
this.namesystem = namesystem;
@@ -364,6 +370,17 @@ public class DatanodeManager {
364370
this.randomNodeOrderEnabled = conf.getBoolean(
365371
DFSConfigKeys.DFS_NAMENODE_RANDOM_NODE_ORDER_ENABLED,
366372
DFSConfigKeys.DFS_NAMENODE_RANDOM_NODE_ORDER_ENABLED_DEFAULT);
373+
374+
long datanodeListCacheExpirationMs = conf.getLong(DFSConfigKeys.DFS_NAMENODE_DATANODE_LIST_CACHE_EXPIRATION_MS_KEY,
375+
DFSConfigKeys.DFS_NAMENODE_DATANODE_LIST_CACHE_EXPIRATION_MS_DEFAULT);
376+
if (datanodeListCacheExpirationMs > 0) {
377+
LOG.info("Using cached DN list for metrics, expiration time = {} ms.",
378+
datanodeListCacheExpirationMs);
379+
datanodeListSnapshots = CacheBuilder.newBuilder()
380+
.expireAfterWrite(datanodeListCacheExpirationMs, TimeUnit.MILLISECONDS)
381+
.build();
382+
}
383+
367384
}
368385

369386
/**
@@ -945,6 +962,12 @@ void addDatanode(final DatanodeDescriptor node) {
945962
synchronized(this) {
946963
host2DatanodeMap.remove(datanodeMap.put(node.getDatanodeUuid(), node));
947964
}
965+
// Nuke the metrics cache
966+
Cache<DatanodeReportType, List<DatanodeDescriptor>> tmpDatanodeListSnapshots =
967+
datanodeListSnapshots;
968+
if (tmpDatanodeListSnapshots != null) {
969+
tmpDatanodeListSnapshots.invalidateAll();
970+
}
948971

949972
networktopology.add(node); // may throw InvalidTopologyException
950973
host2DatanodeMap.add(node);
@@ -963,6 +986,12 @@ private void wipeDatanode(final DatanodeID node) {
963986
synchronized (this) {
964987
host2DatanodeMap.remove(datanodeMap.remove(key));
965988
}
989+
// Nuke the metrics cache
990+
Cache<DatanodeReportType, List<DatanodeDescriptor>> tmpDatanodeListSnapshots =
991+
datanodeListSnapshots;
992+
if (tmpDatanodeListSnapshots != null) {
993+
tmpDatanodeListSnapshots.invalidateAll();
994+
}
966995
if (LOG.isDebugEnabled()) {
967996
LOG.debug("{}.wipeDatanode({}): storage {} is removed from datanodeMap.",
968997
getClass().getSimpleName(), node, key);
@@ -1438,7 +1467,7 @@ public int getNumLiveDataNodes() {
14381467

14391468
/** @return the number of dead datanodes. */
14401469
public int getNumDeadDataNodes() {
1441-
return getDatanodeListForReport(DatanodeReportType.DEAD).size();
1470+
return getDatanodeListSnapshotForReport(DatanodeReportType.DEAD).size();
14421471
}
14431472

14441473
/** @return the number of datanodes. */
@@ -1453,12 +1482,12 @@ public List<DatanodeDescriptor> getDecommissioningNodes() {
14531482
// There is no need to take namesystem reader lock as
14541483
// getDatanodeListForReport will synchronize on datanodeMap
14551484
// A decommissioning DN may be "alive" or "dead".
1456-
return getDatanodeListForReport(DatanodeReportType.DECOMMISSIONING);
1485+
return getDatanodeListSnapshotForReport(DatanodeReportType.DECOMMISSIONING);
14571486
}
14581487

14591488
/** @return list of datanodes that are entering maintenance. */
14601489
public List<DatanodeDescriptor> getEnteringMaintenanceNodes() {
1461-
return getDatanodeListForReport(DatanodeReportType.ENTERING_MAINTENANCE);
1490+
return getDatanodeListSnapshotForReport(DatanodeReportType.ENTERING_MAINTENANCE);
14621491
}
14631492

14641493
/* Getter and Setter for stale DataNodes related attributes */
@@ -1534,15 +1563,19 @@ void setNumStaleStorages(int numStaleStorages) {
15341563

15351564
/** Fetch live and dead datanodes. */
15361565
public void fetchDatanodes(final List<DatanodeDescriptor> live,
1537-
final List<DatanodeDescriptor> dead, final boolean removeDecommissionNode) {
1566+
final List<DatanodeDescriptor> dead, final boolean removeDecommissionNode, boolean useCache) {
15381567
if (live == null && dead == null) {
15391568
throw new HadoopIllegalArgumentException("Both live and dead lists are null");
15401569
}
15411570

1542-
// There is no need to take namesystem reader lock as
1543-
// getDatanodeListForReport will synchronize on datanodeMap
1544-
final List<DatanodeDescriptor> results =
1545-
getDatanodeListForReport(DatanodeReportType.ALL);
1571+
List<DatanodeDescriptor> results;
1572+
if (useCache) {
1573+
results = getDatanodeListSnapshotForReport(DatanodeReportType.ALL);
1574+
} else {
1575+
// There is no need to take namesystem reader lock as
1576+
// getDatanodeListForReport will synchronize on datanodeMap
1577+
results = getDatanodeListForReport(DatanodeReportType.ALL);
1578+
}
15461579
for(DatanodeDescriptor node : results) {
15471580
if (isDatanodeDead(node)) {
15481581
if (dead != null) {
@@ -1635,6 +1668,37 @@ private DatanodeID parseDNFromHostsEntry(String hostLine) {
16351668
return dnId;
16361669
}
16371670

1671+
public void refreshDatanodeListSnapshot(long newExpirationMs) {
1672+
if (newExpirationMs <= 0) {
1673+
LOG.info("New config is non-positive ({}), disabling DN list cache", newExpirationMs);
1674+
datanodeListSnapshots = null;
1675+
} else {
1676+
LOG.info("Resetting DN list cache with new expiration time {}ms", newExpirationMs);
1677+
datanodeListSnapshots = CacheBuilder.newBuilder()
1678+
.expireAfterWrite(newExpirationMs, TimeUnit.MILLISECONDS)
1679+
.build();
1680+
}
1681+
}
1682+
1683+
/**
1684+
* Low impact version of {@link #getDatanodeListForReport} with possible stale
1685+
* data for low impact usage (metrics).
1686+
*/
1687+
public List<DatanodeDescriptor> getDatanodeListSnapshotForReport(
1688+
final DatanodeReportType type) {
1689+
Cache<DatanodeReportType, List<DatanodeDescriptor>> tmpDatanodeListSnapshots =
1690+
datanodeListSnapshots;
1691+
if (tmpDatanodeListSnapshots == null) {
1692+
return getDatanodeListForReport(type);
1693+
}
1694+
try {
1695+
return tmpDatanodeListSnapshots.get(type, () -> getDatanodeListForReport(type));
1696+
} catch (ExecutionException e) {
1697+
// Fallback if cache fails
1698+
return getDatanodeListForReport(type);
1699+
}
1700+
}
1701+
16381702
/** For generating datanode reports */
16391703
public List<DatanodeDescriptor> getDatanodeListForReport(
16401704
final DatanodeReportType type) {

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

Lines changed: 23 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -5788,8 +5788,8 @@ public int getNumDeadDataNodes() {
57885788
@Metric({"NumDecomLiveDataNodes",
57895789
"Number of datanodes which have been decommissioned and are now live"})
57905790
public int getNumDecomLiveDataNodes() {
5791-
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
5792-
getBlockManager().getDatanodeManager().fetchDatanodes(live, null, false);
5791+
final List<DatanodeDescriptor> live = new ArrayList<>();
5792+
getBlockManager().getDatanodeManager().fetchDatanodes(live, null, false, true);
57935793
int liveDecommissioned = 0;
57945794
for (DatanodeDescriptor node : live) {
57955795
liveDecommissioned += node.isDecommissioned() ? 1 : 0;
@@ -5801,8 +5801,8 @@ public int getNumDecomLiveDataNodes() {
58015801
@Metric({"NumDecomDeadDataNodes",
58025802
"Number of datanodes which have been decommissioned and are now dead"})
58035803
public int getNumDecomDeadDataNodes() {
5804-
final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
5805-
getBlockManager().getDatanodeManager().fetchDatanodes(null, dead, false);
5804+
final List<DatanodeDescriptor> dead = new ArrayList<>();
5805+
getBlockManager().getDatanodeManager().fetchDatanodes(null, dead, false, true);
58065806
int deadDecommissioned = 0;
58075807
for (DatanodeDescriptor node : dead) {
58085808
deadDecommissioned += node.isDecommissioned() ? 1 : 0;
@@ -5814,8 +5814,8 @@ public int getNumDecomDeadDataNodes() {
58145814
@Metric({"NumInServiceLiveDataNodes",
58155815
"Number of live datanodes which are currently in service"})
58165816
public int getNumInServiceLiveDataNodes() {
5817-
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
5818-
getBlockManager().getDatanodeManager().fetchDatanodes(live, null, true);
5817+
final List<DatanodeDescriptor> live = new ArrayList<>();
5818+
getBlockManager().getDatanodeManager().fetchDatanodes(live, null, true, true);
58195819
int liveInService = live.size();
58205820
for (DatanodeDescriptor node : live) {
58215821
liveInService -= node.isInMaintenance() ? 1 : 0;
@@ -5827,8 +5827,8 @@ public int getNumInServiceLiveDataNodes() {
58275827
@Metric({"VolumeFailuresTotal",
58285828
"Total number of volume failures across all Datanodes"})
58295829
public int getVolumeFailuresTotal() {
5830-
List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
5831-
getBlockManager().getDatanodeManager().fetchDatanodes(live, null, false);
5830+
List<DatanodeDescriptor> live = new ArrayList<>();
5831+
getBlockManager().getDatanodeManager().fetchDatanodes(live, null, false, true);
58325832
int volumeFailuresTotal = 0;
58335833
for (DatanodeDescriptor node: live) {
58345834
volumeFailuresTotal += node.getVolumeFailures();
@@ -5840,8 +5840,8 @@ public int getVolumeFailuresTotal() {
58405840
@Metric({"EstimatedCapacityLostTotal",
58415841
"An estimate of the total capacity lost due to volume failures"})
58425842
public long getEstimatedCapacityLostTotal() {
5843-
List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
5844-
getBlockManager().getDatanodeManager().fetchDatanodes(live, null, false);
5843+
List<DatanodeDescriptor> live = new ArrayList<>();
5844+
getBlockManager().getDatanodeManager().fetchDatanodes(live, null, false, true);
58455845
long estimatedCapacityLostTotal = 0;
58465846
for (DatanodeDescriptor node: live) {
58475847
VolumeFailureSummary volumeFailureSummary = node.getVolumeFailureSummary();
@@ -6730,10 +6730,9 @@ public int getThreads() {
67306730
*/
67316731
@Override // NameNodeMXBean
67326732
public String getLiveNodes() {
6733-
final Map<String, Map<String,Object>> info =
6734-
new HashMap<String, Map<String,Object>>();
6735-
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
6736-
blockManager.getDatanodeManager().fetchDatanodes(live, null, false);
6733+
final Map<String, Map<String,Object>> info = new HashMap<>();
6734+
final List<DatanodeDescriptor> live = new ArrayList<>();
6735+
blockManager.getDatanodeManager().fetchDatanodes(live, null, false, true);
67376736
for (DatanodeDescriptor node : live) {
67386737
ImmutableMap.Builder<String, Object> innerinfo =
67396738
ImmutableMap.<String,Object>builder();
@@ -6785,10 +6784,9 @@ public String getLiveNodes() {
67856784
*/
67866785
@Override // NameNodeMXBean
67876786
public String getDeadNodes() {
6788-
final Map<String, Map<String, Object>> info =
6789-
new HashMap<String, Map<String, Object>>();
6790-
final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
6791-
blockManager.getDatanodeManager().fetchDatanodes(null, dead, false);
6787+
final Map<String, Map<String, Object>> info = new HashMap<>();
6788+
final List<DatanodeDescriptor> dead = new ArrayList<>();
6789+
blockManager.getDatanodeManager().fetchDatanodes(null, dead, false, true);
67926790
for (DatanodeDescriptor node : dead) {
67936791
Map<String, Object> innerinfo = ImmutableMap.<String, Object>builder()
67946792
.put("lastContact", getLastContact(node))
@@ -6916,10 +6914,9 @@ public String getNodeUsage() {
69166914
float min = 0;
69176915
float dev = 0;
69186916

6919-
final Map<String, Map<String,Object>> info =
6920-
new HashMap<String, Map<String,Object>>();
6921-
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
6922-
blockManager.getDatanodeManager().fetchDatanodes(live, null, true);
6917+
final Map<String, Map<String,Object>> info = new HashMap<>();
6918+
final List<DatanodeDescriptor> live = new ArrayList<>();
6919+
blockManager.getDatanodeManager().fetchDatanodes(live, null, true, true);
69236920
for (Iterator<DatanodeDescriptor> it = live.iterator(); it.hasNext();) {
69246921
DatanodeDescriptor node = it.next();
69256922
if (!node.isInService()) {
@@ -9094,8 +9091,8 @@ public long getBytesInFuture() {
90949091
@Metric({"NumInMaintenanceLiveDataNodes",
90959092
"Number of live Datanodes which are in maintenance state"})
90969093
public int getNumInMaintenanceLiveDataNodes() {
9097-
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
9098-
getBlockManager().getDatanodeManager().fetchDatanodes(live, null, true);
9094+
final List<DatanodeDescriptor> live = new ArrayList<>();
9095+
getBlockManager().getDatanodeManager().fetchDatanodes(live, null, true, true);
90999096
int liveInMaintenance = 0;
91009097
for (DatanodeDescriptor node : live) {
91019098
liveInMaintenance += node.isInMaintenance() ? 1 : 0;
@@ -9107,8 +9104,8 @@ public int getNumInMaintenanceLiveDataNodes() {
91079104
@Metric({"NumInMaintenanceDeadDataNodes",
91089105
"Number of dead Datanodes which are in maintenance state"})
91099106
public int getNumInMaintenanceDeadDataNodes() {
9110-
final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
9111-
getBlockManager().getDatanodeManager().fetchDatanodes(null, dead, true);
9107+
final List<DatanodeDescriptor> dead = new ArrayList<>();
9108+
getBlockManager().getDatanodeManager().fetchDatanodes(null, dead, true, true);
91129109
int deadInMaintenance = 0;
91139110
for (DatanodeDescriptor node : dead) {
91149111
deadInMaintenance += node.isInMaintenance() ? 1 : 0;

hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6712,4 +6712,14 @@
67126712
Enables observer reads for clients. This should only be enabled when clients are using routers.
67136713
</description>
67146714
</property>
6715+
<property>
6716+
<name>dfs.namenode.datanode.list.cache.expiration.ms</name>
6717+
<value>0</value>
6718+
<description>
6719+
Set to a positive number to cache values for DatanodeManager.getDatanodeListForReport for
6720+
performance purpose. Milliseconds for cache expiration from insertion. 0 or negative value
6721+
to disable this cache.
6722+
Non metrics usage will bypass this cache (fsck, datanodeReport, etc.)
6723+
</description>
6724+
</property>
67156725
</configuration>

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -703,7 +703,7 @@ public static int firstDnWithBlock(MiniDFSCluster cluster, ExtendedBlock b)
703703
*/
704704
public static long getLiveDatanodeCapacity(DatanodeManager dm) {
705705
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
706-
dm.fetchDatanodes(live, null, false);
706+
dm.fetchDatanodes(live, null, false, false);
707707
long capacity = 0;
708708
for (final DatanodeDescriptor dn : live) {
709709
capacity += dn.getCapacity();
@@ -715,8 +715,8 @@ public static long getLiveDatanodeCapacity(DatanodeManager dm) {
715715
* Return the capacity of the given live DN.
716716
*/
717717
public static long getDatanodeCapacity(DatanodeManager dm, int index) {
718-
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
719-
dm.fetchDatanodes(live, null, false);
718+
final List<DatanodeDescriptor> live = new ArrayList<>();
719+
dm.fetchDatanodes(live, null, false, false);
720720
return live.get(index).getCapacity();
721721
}
722722

@@ -737,7 +737,7 @@ public static void waitForDatanodeStatus(DatanodeManager dm, int expectedLive,
737737
Thread.sleep(timeout);
738738
live.clear();
739739
dead.clear();
740-
dm.fetchDatanodes(live, dead, false);
740+
dm.fetchDatanodes(live, dead, false, false);
741741
currTotalCapacity = 0;
742742
volFails = 0;
743743
for (final DatanodeDescriptor dd : live) {

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ public void testBlocksScheduledCounter() throws IOException {
7878
ArrayList<DatanodeDescriptor> dnList = new ArrayList<DatanodeDescriptor>();
7979
final DatanodeManager dm = cluster.getNamesystem().getBlockManager(
8080
).getDatanodeManager();
81-
dm.fetchDatanodes(dnList, dnList, false);
81+
dm.fetchDatanodes(dnList, dnList, false, false);
8282
DatanodeDescriptor dn = dnList.get(0);
8383

8484
assertEquals(1, dn.getBlocksScheduled());
@@ -103,7 +103,7 @@ public void testScheduledBlocksCounterShouldDecrementOnAbandonBlock()
103103
DatanodeManager datanodeManager = cluster.getNamesystem().getBlockManager()
104104
.getDatanodeManager();
105105
ArrayList<DatanodeDescriptor> dnList = new ArrayList<DatanodeDescriptor>();
106-
datanodeManager.fetchDatanodes(dnList, dnList, false);
106+
datanodeManager.fetchDatanodes(dnList, dnList, false, false);
107107
for (DatanodeDescriptor descriptor : dnList) {
108108
assertEquals(0, descriptor.getBlocksScheduled(),
109109
"Blocks scheduled should be 0 for " + descriptor.getName());
@@ -169,7 +169,7 @@ public void testScheduledBlocksCounterDecrementOnDeletedBlock()
169169
cluster.getNamesystem().getBlockManager().getDatanodeManager();
170170
ArrayList<DatanodeDescriptor> dnList =
171171
new ArrayList<DatanodeDescriptor>();
172-
datanodeManager.fetchDatanodes(dnList, dnList, false);
172+
datanodeManager.fetchDatanodes(dnList, dnList, false, false);
173173

174174
// 3. mark a couple of blocks as corrupt
175175
LocatedBlock block = NameNodeAdapter
@@ -230,7 +230,7 @@ public void testBlocksScheduledCounterOnTruncate() throws Exception {
230230
cluster.getNamesystem().getBlockManager().getDatanodeManager();
231231
ArrayList<DatanodeDescriptor> dnList =
232232
new ArrayList<DatanodeDescriptor>();
233-
datanodeManager.fetchDatanodes(dnList, dnList, false);
233+
datanodeManager.fetchDatanodes(dnList, dnList, false, false);
234234

235235
// 3. restart the stopped datanode
236236
cluster.restartDataNode(0);

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -231,7 +231,7 @@ public void testNullCheckSumWhenDNRestarted()
231231
// fetch live DN
232232
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
233233
cluster.getNameNode().getNamesystem().getBlockManager()
234-
.getDatanodeManager().fetchDatanodes(live, null, false);
234+
.getDatanodeManager().fetchDatanodes(live, null, false, false);
235235
assertTrue(live.size() == 2,
236236
"DN start should be success and live dn should be 2");
237237
assertTrue(fs.getFileStatus(file).getLen() == chunkSize,

0 commit comments

Comments
 (0)