Skip to content

Commit 9ac01ec

Browse files
author
NeiroYT
committed
Changes
1 parent 7643258 commit 9ac01ec

File tree

6 files changed

+109
-5
lines changed

6 files changed

+109
-5
lines changed

include/graph/graph.hpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,8 @@ class Graph {
7575
Graph& operator=(Graph&&) noexcept = default;
7676
~Graph() = default;
7777

78-
void clone(Graph& result, Tensor& out, const RuntimeOptions& options) const;
78+
void clone(Graph& result, Tensor& out,
79+
const RuntimeOptions& options = RuntimeOptions()) const;
7980

8081
void setSplitDistribution(
8182
std::vector<std::vector<std::pair<int, int>>> split_dist) {

include/layers/ConvLayer.hpp

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,9 @@ class ConvolutionalLayer : public Layer {
3636
ConvolutionalLayer(size_t step, size_t pads, size_t dilations, Tensor& kernel,
3737
Tensor& bias = *std::make_shared<Tensor>(),
3838
size_t group = 1, bool useLegacyImpl = false)
39-
: Layer(kConvolution), kernel_(&kernel), bias_(&bias) {
39+
: Layer(kConvolution),
40+
kernel_(std::make_shared<Tensor>(kernel)),
41+
bias_(std::make_shared<Tensor>(bias)) {
4042
stride_ = step;
4143
pads_ = pads;
4244
group_ = group;

include/layers/FCLayer.hpp

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,9 @@ class FCLayer : public Layer {
1717
public:
1818
FCLayer() : Layer(kFullyConnected), weights_(nullptr), bias_(nullptr) {}
1919
FCLayer(Tensor& weights, Tensor& bias)
20-
: Layer(kFullyConnected), weights_(&weights), bias_(&bias) {}
20+
: Layer(kFullyConnected),
21+
weights_(std::make_shared<Tensor>(weights)),
22+
bias_(std::make_shared<Tensor>(bias)) {}
2123
FCLayer(std::shared_ptr<Tensor> weights, std::shared_ptr<Tensor> bias)
2224
: Layer(kFullyConnected), weights_(weights), bias_(bias) {}
2325
void run(const std::vector<Tensor>& input,

include/layers_oneDNN/ConvLayer.hpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,8 +28,8 @@ class ConvLayerOneDnn : public Layer {
2828
stride_(stride),
2929
pads_(pads),
3030
dilations_(dilations),
31-
kernel_(&kernel),
32-
bias_(&bias),
31+
kernel_(std::make_shared<Tensor>(kernel)),
32+
bias_(std::make_shared<Tensor>(bias)),
3333
group_(group),
3434
use_legacy_(use_legacy) {}
3535

src/graph/graph.cpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -164,6 +164,11 @@ std::shared_ptr<Layer> layer_based_shared_copy(
164164
*dynamic_cast<BatchNormalizationLayer*>(layer.get()));
165165
return std::shared_ptr<Layer>(tmp_layer);
166166
}
167+
case it_lab_ai::kOutput: {
168+
OutputLayer* tmp_layer =
169+
new OutputLayer(*dynamic_cast<OutputLayer*>(layer.get()));
170+
return std::shared_ptr<Layer>(tmp_layer);
171+
}
167172
default: {
168173
throw std::invalid_argument("No such layer type");
169174
}

test/graph/test_graph.cpp

Lines changed: 94 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,15 +5,109 @@
55
#include "graph/graph.hpp"
66
#include "graph_transformations/graph_transformations.hpp"
77
#include "gtest/gtest.h"
8+
#include "layers/BatchNormalizationLayer.hpp"
9+
#include "layers/BinaryOpLayer.hpp"
810
#include "layers/ConcatLayer.hpp"
11+
#include "layers/ConvLayer.hpp"
12+
#include "layers/DropOutLayer.hpp"
913
#include "layers/EWLayer.hpp"
1014
#include "layers/FCLayer.hpp"
15+
#include "layers/FlattenLayer.hpp"
1116
#include "layers/InputLayer.hpp"
17+
#include "layers/MatmulLayer.hpp"
18+
#include "layers/OutputLayer.hpp"
19+
#include "layers/PoolingLayer.hpp"
20+
#include "layers/ReduceLayer.hpp"
21+
#include "layers/ReshapeLayer.hpp"
22+
#include "layers/SoftmaxLayer.hpp"
1223
#include "layers/SplitLayer.hpp"
24+
#include "layers/Tensor.hpp"
25+
#include "layers/TransposeLayer.hpp"
26+
#include "layers_oneDNN/BinaryOpLayer.hpp"
27+
#include "layers_oneDNN/ConvLayer.hpp"
28+
#include "layers_oneDNN/EWLayer.hpp"
29+
#include "layers_oneDNN/PoolingLayer.hpp"
30+
#include "layers_oneDNN/ReduceLayer.hpp"
1331
#include "perf/benchmarking.hpp"
1432

1533
using namespace it_lab_ai;
1634

35+
TEST(graph, test_deep_copy) {
36+
Graph graph;
37+
Graph graph2;
38+
Graph graph_c;
39+
Graph graph2_c;
40+
Tensor input = make_tensor<float>({1.0F, 2.0F}, {2});
41+
Tensor output;
42+
auto lay1 = std::make_shared<InputLayer>();
43+
Shape sh = {2, 2};
44+
auto lay2 = std::make_shared<PoolingLayer>(sh, "average");
45+
auto lay3 = std::make_shared<EWLayer>();
46+
auto lay3_alt = std::make_shared<EwLayerOneDnn>();
47+
auto lay4 = std::make_shared<ConvolutionalLayer>();
48+
auto lay4_alt = std::make_shared<ConvLayerOneDnn>();
49+
auto lay5 = std::make_shared<FCLayer>();
50+
auto lay6 = std::make_shared<FlattenLayer>();
51+
auto lay7 = std::make_shared<FlattenLayer>();
52+
auto lay8 = std::make_shared<DropOutLayer>();
53+
auto lay9 = std::make_shared<SplitLayer>(0, 2);
54+
auto lay10 = std::make_shared<BinaryOpLayer>();
55+
auto lay10_alt = std::make_shared<BinaryOpLayerOneDnn>();
56+
auto lay11 = std::make_shared<TransposeLayer>();
57+
auto lay12 = std::make_shared<MatmulLayer>();
58+
auto lay13 = std::make_shared<ReshapeLayer>();
59+
auto lay14 = std::make_shared<SoftmaxLayer>();
60+
auto lay15 = std::make_shared<ReduceLayer>();
61+
auto lay15_alt = std::make_shared<ReduceLayerOneDnn>();
62+
Tensor scale = make_tensor<float>({1.0f}, {1});
63+
Tensor bias = make_tensor<float>({0.0f}, {1});
64+
Tensor mean = make_tensor<float>({0.0f}, {1});
65+
Tensor var = make_tensor<float>({1.0f}, {1});
66+
auto lay16 =
67+
std::make_shared<BatchNormalizationLayer>(scale, bias, mean, var);
68+
auto lay17 = std::make_shared<OutputLayer>();
69+
graph.setInput(lay1, input);
70+
graph2.setInput(lay1, input);
71+
graph.makeConnection(lay1, lay2);
72+
graph2.makeConnection(lay1, lay2);
73+
graph.makeConnection(lay1, lay3);
74+
graph2.makeConnection(lay1, lay3_alt);
75+
graph.makeConnection(lay2, lay4);
76+
graph2.makeConnection(lay2, lay4_alt);
77+
graph.makeConnection(lay2, lay5);
78+
graph2.makeConnection(lay2, lay5);
79+
graph.makeConnection(lay3, lay6);
80+
graph2.makeConnection(lay3, lay6);
81+
graph.makeConnection(lay3, lay7);
82+
graph2.makeConnection(lay3, lay7);
83+
graph.makeConnection(lay4, lay8);
84+
graph2.makeConnection(lay4, lay8);
85+
graph.makeConnection(lay4, lay9);
86+
graph2.makeConnection(lay4, lay9);
87+
graph.makeConnection(lay5, lay10);
88+
graph2.makeConnection(lay5, lay10_alt);
89+
graph.makeConnection(lay5, lay11);
90+
graph2.makeConnection(lay5, lay11);
91+
graph.makeConnection(lay6, lay12);
92+
graph2.makeConnection(lay6, lay12);
93+
graph.makeConnection(lay6, lay13);
94+
graph2.makeConnection(lay6, lay13);
95+
graph.makeConnection(lay7, lay14);
96+
graph2.makeConnection(lay7, lay14);
97+
graph.makeConnection(lay7, lay15);
98+
graph2.makeConnection(lay7, lay15_alt);
99+
graph.makeConnection(lay8, lay16);
100+
graph2.makeConnection(lay8, lay16);
101+
graph.makeConnection(lay8, lay17);
102+
graph2.makeConnection(lay8, lay17);
103+
graph.setOutput(lay16, output);
104+
graph2.setOutput(lay16, output);
105+
RuntimeOptions opt;
106+
opt.backend = Backend::kOneDnn;
107+
ASSERT_NO_THROW(graph.clone(graph_c, output));
108+
ASSERT_NO_THROW(graph.clone(graph2_c, output, opt));
109+
}
110+
17111
TEST(graph, check_connection) {
18112
const std::vector<float> vec1 = {2.0F, 1.5F, 0.1F, 1.9F, 0.0F, 5.5F};
19113
Tensor weights = make_tensor<float>(vec1, {3, 2});

0 commit comments

Comments
 (0)