diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index df9e3907bdaf2..96226f45f6a48 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -2110,4 +2110,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final long DFS_LEASE_HARDLIMIT_DEFAULT = HdfsClientConfigKeys.DFS_LEASE_HARDLIMIT_DEFAULT; + /** + * List of packages from which Step implementations will be loaded when deserializing + * Node Plans: {@value}. + */ + public static final String SUPPORTED_PACKAGES_CONFIG_NAME = + "dfs.nodeplan.steps.supported.packages"; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/MoveStep.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/MoveStep.java index 7f2f954af03f0..4d836a478cde9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/MoveStep.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/MoveStep.java @@ -217,7 +217,7 @@ public void setMaxDiskErrors(long maxDiskErrors) { * * For example : if the ideal amount on each disk was 1 TB and the * tolerance was 10%, then getting to 900 GB on the destination disk is - * considerd good enough. + * considered good enough. * * @return tolerance percentage. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/NodePlan.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/NodePlan.java index 39a7c57bca2cd..127456bd7e3f4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/NodePlan.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/NodePlan.java @@ -18,14 +18,22 @@ package org.apache.hadoop.hdfs.server.diskbalancer.planner; import com.fasterxml.jackson.annotation.JsonTypeInfo; +import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; import com.fasterxml.jackson.databind.ObjectWriter; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.util.Preconditions; import java.io.IOException; +import java.util.Collection; +import java.util.Iterator; import java.util.LinkedList; import java.util.List; +import java.util.Map; + +import static org.apache.hadoop.hdfs.DFSConfigKeys.SUPPORTED_PACKAGES_CONFIG_NAME; /** * NodePlan is a set of volumeSetPlans. @@ -43,6 +51,9 @@ public class NodePlan { private static final ObjectReader READER = MAPPER.readerFor(NodePlan.class); private static final ObjectWriter WRITER = MAPPER.writerFor( MAPPER.constructType(NodePlan.class)); + private static final Configuration CONFIGURATION = new HdfsConfiguration(); + private static final Collection SUPPORTED_PACKAGES = getAllowedPackages(); + /** * returns timestamp when this plan was created. * @@ -80,7 +91,7 @@ public NodePlan(String datanodeName, int rpcPort) { /** * Returns a Map of VolumeSetIDs and volumeSetPlans. * - * @return Map + * @return List of Steps */ public List getVolumeSetPlans() { return volumeSetPlans; @@ -151,20 +162,55 @@ public void setPort(int port) { } /** - * Parses a Json string and converts to NodePlan. + * Parses a JSON string and converts to NodePlan. * - * @param json - Json String + * @param json - JSON String * @return NodePlan * @throws IOException */ public static NodePlan parseJson(String json) throws IOException { - return READER.readValue(json); + JsonNode tree = READER.readTree(json); + checkNodes(tree); + return READER.readValue(tree); } /** - * Returns a Json representation of NodePlan. + * Iterate through the tree structure beginning at the input `node`. This includes + * checking arrays and within JSON object structures (allowing for nested structures) * - * @return - json String + * @param node a node representing the root of tree structure + * @throws IOException if any unexpected `@class` values are found - this is the + * pre-existing exception type exposed by the calling code + */ + private static void checkNodes(JsonNode node) throws IOException { + if (node == null) { + return; + } + + // Check Node and Recurse into child nodes + if (node.isObject()) { + Iterator> fieldsIterator = node.fields(); + while (fieldsIterator.hasNext()) { + Map.Entry entry = fieldsIterator.next(); + if ("@class".equals(entry.getKey())) { + String textValue = entry.getValue().asText(); + if (textValue != null && !textValue.isBlank() && !stepClassIsAllowed(textValue)) { + throw new IOException("Invalid @class value in NodePlan JSON: " + textValue); + } + } + checkNodes(entry.getValue()); + } + } else if (node.isArray()) { + for (int i = 0; i < node.size(); i++) { + checkNodes(node.get(i)); + } + } + } + + /** + * Returns a JSON representation of NodePlan. + * + * @return - JSON String * @throws IOException */ public String toJson() throws IOException { @@ -188,4 +234,21 @@ public String getNodeUUID() { public void setNodeUUID(String nodeUUID) { this.nodeUUID = nodeUUID; } + + private static boolean stepClassIsAllowed(String className) { + for (String pkg : SUPPORTED_PACKAGES) { + if (className.startsWith(pkg)) { + return true; + } + } + return false; + } + + private static Collection getAllowedPackages() { + return CONFIGURATION.getStringCollection(SUPPORTED_PACKAGES_CONFIG_NAME) + .stream() + .map(String::trim) + .filter(s -> !s.isEmpty()) + .toList(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/Step.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/Step.java index 8f696537aff4f..6a52223e3dd21 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/Step.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/Step.java @@ -66,7 +66,7 @@ public interface Step { String getSizeString(long size); /** - * Returns maximum number of disk erros tolerated. + * Returns maximum number of disk errors tolerated. * @return long. */ long getMaxDiskErrors(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 2b889dd2adc5c..7d7e53773ff07 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -6712,4 +6712,12 @@ Enables observer reads for clients. This should only be enabled when clients are using routers. + + dfs.nodeplan.steps.supported.packages + org.apache.hadoop.hdfs.server.diskbalancer.planner + + Only Step implementations in the named packages will be accepted when deserializing. + If you have more than one package, separate the packages using commas. + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/TestNodePlan.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/TestNodePlan.java new file mode 100644 index 0000000000000..bef5a3823cb11 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/TestNodePlan.java @@ -0,0 +1,171 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.hdfs.server.diskbalancer.planner; + +import java.io.IOException; + +import org.junit.jupiter.api.Test; +import sample.SampleStep; + +import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume; +import org.apache.hadoop.test.LambdaTestUtils; + +import static org.assertj.core.api.Assertions.assertThat; + +public class TestNodePlan { + + @Test + public void testNodePlan() throws IOException { + NodePlan nodePlan = new NodePlan("datanode1234", 1234); + MoveStep moveStep = new MoveStep(); + moveStep.setBandwidth(12345); + moveStep.setBytesToMove(98765); + moveStep.setIdealStorage(1.234); + moveStep.setMaxDiskErrors(4567); + moveStep.setVolumeSetID("id1234"); + nodePlan.addStep(moveStep); + String json = nodePlan.toJson(); + assertThat(NodePlan.parseJson(json)).isNotNull(); + } + + @Test + public void testNodePlanWithDisallowedStep() throws Exception { + NodePlan nodePlan = new NodePlan("datanode1234", 1234); + Step sampleStep = new SampleStep(); + sampleStep.setBandwidth(12345); + sampleStep.setMaxDiskErrors(4567); + nodePlan.addStep(sampleStep); + assertNodePlanInvalid(nodePlan); + } + + @Test + public void testNodePlanWithSecondStepDisallowed() throws Exception { + NodePlan nodePlan = new NodePlan("datanode1234", 1234); + MoveStep moveStep = new MoveStep(); + moveStep.setBandwidth(12345); + moveStep.setBytesToMove(98765); + moveStep.setIdealStorage(1.234); + moveStep.setMaxDiskErrors(4567); + moveStep.setVolumeSetID("id1234"); + nodePlan.addStep(moveStep); + Step sampleStep = new SampleStep(); + sampleStep.setBandwidth(12345); + sampleStep.setMaxDiskErrors(4567); + nodePlan.addStep(sampleStep); + assertNodePlanInvalid(nodePlan); + } + + @Test + public void testNodePlanWithNestedDisallowedStep() throws Exception { + NodePlan nodePlan = new NodePlan("datanode1234", 1234); + NodePlan nodePlan2 = new NodePlan("datanode9876", 9876); + SampleStep sampleStep = new SampleStep(); + sampleStep.setBandwidth(12345); + sampleStep.setMaxDiskErrors(4567); + nodePlan2.addStep(sampleStep); + NestedStep nestedStep = new NestedStep(nodePlan2); + nestedStep.setBandwidth(1234); + nestedStep.setMaxDiskErrors(456); + nodePlan.addStep(nestedStep); + assertNodePlanInvalid(nodePlan); + } + + private void assertNodePlanInvalid(final NodePlan nodePlan) throws Exception { + LambdaTestUtils.intercept( + IOException.class, + "Invalid @class value in NodePlan JSON: sample.SampleStep", + () -> NodePlan.parseJson(nodePlan.toJson())); + } + + private static class NestedStep implements Step { + @JsonProperty + private NodePlan nodePlan; + + NestedStep() { + // needed to make Jackson deserialization easier + } + + NestedStep(NodePlan nodePlan) { + this.nodePlan = nodePlan; + } + + NodePlan getNodePlan() { + return nodePlan; + } + + @Override + public long getBytesToMove() { + return 0; + } + + @Override + public DiskBalancerVolume getDestinationVolume() { + return null; + } + + @Override + public double getIdealStorage() { + return 0; + } + + @Override + public DiskBalancerVolume getSourceVolume() { + return null; + } + + @Override + public String getVolumeSetID() { + return ""; + } + + @Override + public String getSizeString(long size) { + return ""; + } + + @Override + public long getMaxDiskErrors() { + return 0; + } + + @Override + public long getTolerancePercent() { + return 0; + } + + @Override + public long getBandwidth() { + return 0; + } + + @Override + public void setTolerancePercent(long tolerancePercent) { + + } + + @Override + public void setBandwidth(long bandwidth) { + + } + + @Override + public void setMaxDiskErrors(long maxDiskErrors) { + + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/sample/SampleStep.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/sample/SampleStep.java new file mode 100644 index 0000000000000..4240c5b08b61e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/sample/SampleStep.java @@ -0,0 +1,90 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package sample; + +import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume; +import org.apache.hadoop.hdfs.server.diskbalancer.planner.Step; + +/** + * A sample Step implementation used in Serde Tests. + */ +public class SampleStep implements Step { + private long bytesToMove; + private long bandwidth; + private long tolerancePercent; + private long maxDiskErrors; + + @Override + public long getBytesToMove() { + return bytesToMove; + } + + @Override + public DiskBalancerVolume getDestinationVolume() { + return null; + } + + @Override + public double getIdealStorage() { + return 0; + } + + @Override + public DiskBalancerVolume getSourceVolume() { + return null; + } + + @Override + public String getVolumeSetID() { + return ""; + } + + @Override + public String getSizeString(long size) { + return Long.toString(size); + } + + @Override + public long getMaxDiskErrors() { + return maxDiskErrors; + } + + @Override + public long getTolerancePercent() { + return tolerancePercent; + } + + @Override + public long getBandwidth() { + return bandwidth; + } + + @Override + public void setTolerancePercent(long tolerancePercent) { + this.tolerancePercent = tolerancePercent; + } + + @Override + public void setBandwidth(long bandwidth) { + this.bandwidth = bandwidth; + } + + @Override + public void setMaxDiskErrors(long maxDiskErrors) { + this.maxDiskErrors = maxDiskErrors; + } +}