Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -2110,4 +2110,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final long DFS_LEASE_HARDLIMIT_DEFAULT =
HdfsClientConfigKeys.DFS_LEASE_HARDLIMIT_DEFAULT;

/**
* List of packages from which Step implementations will be loaded when deserializing
* Node Plans: {@value}.
*/
public static final String SUPPORTED_PACKAGES_CONFIG_NAME =
"dfs.nodeplan.steps.supported.packages";
}
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,7 @@ public void setMaxDiskErrors(long maxDiskErrors) {
*
* For example : if the ideal amount on each disk was 1 TB and the
* tolerance was 10%, then getting to 900 GB on the destination disk is
* considerd good enough.
* considered good enough.
*
* @return tolerance percentage.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,22 @@
package org.apache.hadoop.hdfs.server.diskbalancer.planner;

import com.fasterxml.jackson.annotation.JsonTypeInfo;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
import com.fasterxml.jackson.databind.ObjectWriter;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.util.Preconditions;

import java.io.IOException;
import java.util.Collection;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;

import static org.apache.hadoop.hdfs.DFSConfigKeys.SUPPORTED_PACKAGES_CONFIG_NAME;

/**
* NodePlan is a set of volumeSetPlans.
Expand All @@ -43,6 +51,9 @@ public class NodePlan {
private static final ObjectReader READER = MAPPER.readerFor(NodePlan.class);
private static final ObjectWriter WRITER = MAPPER.writerFor(
MAPPER.constructType(NodePlan.class));
private static final Configuration CONFIGURATION = new HdfsConfiguration();
private static final Collection<String> SUPPORTED_PACKAGES = getAllowedPackages();

/**
* returns timestamp when this plan was created.
*
Expand Down Expand Up @@ -80,7 +91,7 @@ public NodePlan(String datanodeName, int rpcPort) {
/**
* Returns a Map of VolumeSetIDs and volumeSetPlans.
*
* @return Map
* @return List of Steps
*/
public List<Step> getVolumeSetPlans() {
return volumeSetPlans;
Expand Down Expand Up @@ -151,20 +162,55 @@ public void setPort(int port) {
}

/**
* Parses a Json string and converts to NodePlan.
* Parses a JSON string and converts to NodePlan.
*
* @param json - Json String
* @param json - JSON String
* @return NodePlan
* @throws IOException
*/
public static NodePlan parseJson(String json) throws IOException {
return READER.readValue(json);
JsonNode tree = READER.readTree(json);
checkNodes(tree);
return READER.readValue(tree);
}

/**
* Returns a Json representation of NodePlan.
* Iterate through the tree structure beginning at the input `node`. This includes
* checking arrays and within JSON object structures (allowing for nested structures)
*
* @return - json String
* @param node a node representing the root of tree structure
* @throws IOException if any unexpected `@class` values are found - this is the
* pre-existing exception type exposed by the calling code
*/
private static void checkNodes(JsonNode node) throws IOException {
if (node == null) {
return;
}

// Check Node and Recurse into child nodes
if (node.isObject()) {
Iterator<Map.Entry<String, JsonNode>> fieldsIterator = node.fields();
while (fieldsIterator.hasNext()) {
Map.Entry<String, JsonNode> entry = fieldsIterator.next();
if ("@class".equals(entry.getKey())) {
String textValue = entry.getValue().asText();
if (textValue != null && !textValue.isBlank() && !stepClassIsAllowed(textValue)) {
throw new IOException("Invalid @class value in NodePlan JSON: " + textValue);
}
}
checkNodes(entry.getValue());
}
} else if (node.isArray()) {
for (int i = 0; i < node.size(); i++) {
checkNodes(node.get(i));
}
}
}

/**
* Returns a JSON representation of NodePlan.
*
* @return - JSON String
* @throws IOException
*/
public String toJson() throws IOException {
Expand All @@ -188,4 +234,21 @@ public String getNodeUUID() {
public void setNodeUUID(String nodeUUID) {
this.nodeUUID = nodeUUID;
}

private static boolean stepClassIsAllowed(String className) {
for (String pkg : SUPPORTED_PACKAGES) {
if (className.startsWith(pkg)) {
return true;
}
}
return false;
}

private static Collection<String> getAllowedPackages() {
return CONFIGURATION.getStringCollection(SUPPORTED_PACKAGES_CONFIG_NAME)
.stream()
.map(String::trim)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

needless, they get trimmed already. Do leave that filter in though as I'm not sure what happens there

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

StringUtils.getStringCollection uses the legacy and discouraged java.util.StringTokenizer.
This StringTokenizer does not trim values - it only omits the delimiter so stray whitespace after a comma, for instance, will be returned.

  public static void main(String args[]) {
    String text = "a, b";
    java.util.StringTokenizer tokenizer = new java.util.StringTokenizer(text, ",");
    while (tokenizer.hasMoreTokens()) {
      System.out.println("<" + tokenizer.nextToken() + ">");
    }
  }

.filter(s -> !s.isEmpty())
.toList();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ public interface Step {
String getSizeString(long size);

/**
* Returns maximum number of disk erros tolerated.
* Returns maximum number of disk errors tolerated.
* @return long.
*/
long getMaxDiskErrors();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6712,4 +6712,12 @@
Enables observer reads for clients. This should only be enabled when clients are using routers.
</description>
</property>
<property>
<name>dfs.nodeplan.steps.supported.packages</name>
<value>org.apache.hadoop.hdfs.server.diskbalancer.planner</value>
<description>
Only Step implementations in the named packages will be accepted when deserializing.
If you have more than one package, separate the packages using commas.
</description>
</property>
</configuration>
Original file line number Diff line number Diff line change
@@ -0,0 +1,171 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.hdfs.server.diskbalancer.planner;

import java.io.IOException;

import org.junit.jupiter.api.Test;
import sample.SampleStep;

import com.fasterxml.jackson.annotation.JsonProperty;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
import org.apache.hadoop.test.LambdaTestUtils;

import static org.assertj.core.api.Assertions.assertThat;

public class TestNodePlan {

@Test
public void testNodePlan() throws IOException {
NodePlan nodePlan = new NodePlan("datanode1234", 1234);
MoveStep moveStep = new MoveStep();
moveStep.setBandwidth(12345);
moveStep.setBytesToMove(98765);
moveStep.setIdealStorage(1.234);
moveStep.setMaxDiskErrors(4567);
moveStep.setVolumeSetID("id1234");
nodePlan.addStep(moveStep);
String json = nodePlan.toJson();
assertThat(NodePlan.parseJson(json)).isNotNull();
}

@Test
public void testNodePlanWithDisallowedStep() throws Exception {
NodePlan nodePlan = new NodePlan("datanode1234", 1234);
Step sampleStep = new SampleStep();
sampleStep.setBandwidth(12345);
sampleStep.setMaxDiskErrors(4567);
nodePlan.addStep(sampleStep);
assertNodePlanInvalid(nodePlan);
}

@Test
public void testNodePlanWithSecondStepDisallowed() throws Exception {
NodePlan nodePlan = new NodePlan("datanode1234", 1234);
MoveStep moveStep = new MoveStep();
moveStep.setBandwidth(12345);
moveStep.setBytesToMove(98765);
moveStep.setIdealStorage(1.234);
moveStep.setMaxDiskErrors(4567);
moveStep.setVolumeSetID("id1234");
nodePlan.addStep(moveStep);
Step sampleStep = new SampleStep();
sampleStep.setBandwidth(12345);
sampleStep.setMaxDiskErrors(4567);
nodePlan.addStep(sampleStep);
assertNodePlanInvalid(nodePlan);
}

@Test
public void testNodePlanWithNestedDisallowedStep() throws Exception {
NodePlan nodePlan = new NodePlan("datanode1234", 1234);
NodePlan nodePlan2 = new NodePlan("datanode9876", 9876);
SampleStep sampleStep = new SampleStep();
sampleStep.setBandwidth(12345);
sampleStep.setMaxDiskErrors(4567);
nodePlan2.addStep(sampleStep);
NestedStep nestedStep = new NestedStep(nodePlan2);
nestedStep.setBandwidth(1234);
nestedStep.setMaxDiskErrors(456);
nodePlan.addStep(nestedStep);
assertNodePlanInvalid(nodePlan);
}

private void assertNodePlanInvalid(final NodePlan nodePlan) throws Exception {
LambdaTestUtils.intercept(
IOException.class,
"Invalid @class value in NodePlan JSON: sample.SampleStep",
() -> NodePlan.parseJson(nodePlan.toJson()));
}

private static class NestedStep implements Step {
@JsonProperty
private NodePlan nodePlan;

NestedStep() {
// needed to make Jackson deserialization easier
}

NestedStep(NodePlan nodePlan) {
this.nodePlan = nodePlan;
}

NodePlan getNodePlan() {
return nodePlan;
}

@Override
public long getBytesToMove() {
return 0;
}

@Override
public DiskBalancerVolume getDestinationVolume() {
return null;
}

@Override
public double getIdealStorage() {
return 0;
}

@Override
public DiskBalancerVolume getSourceVolume() {
return null;
}

@Override
public String getVolumeSetID() {
return "";
}

@Override
public String getSizeString(long size) {
return "";
}

@Override
public long getMaxDiskErrors() {
return 0;
}

@Override
public long getTolerancePercent() {
return 0;
}

@Override
public long getBandwidth() {
return 0;
}

@Override
public void setTolerancePercent(long tolerancePercent) {

}

@Override
public void setBandwidth(long bandwidth) {

}

@Override
public void setMaxDiskErrors(long maxDiskErrors) {

}
}
}
Loading