Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
164 changes: 141 additions & 23 deletions python/tvm/relax/frontend/onnx/onnx_frontend.py
Original file line number Diff line number Diff line change
Expand Up @@ -2632,6 +2632,105 @@ def _impl_v1(cls, bb, inputs, attr, params):
return inputs[0]


def _onnx_resize_spatial_roi_vector(roi_full: relax.Expr, rank: int) -> relax.Expr:
"""Map ONNX ROI [starts..., ends...] to TOPI spatial ROI (drop N/C axes)."""
return relax.op.concat(
[
relax.op.strided_slice(roi_full, axes=[0], begin=[2], end=[rank]),
relax.op.strided_slice(roi_full, axes=[0], begin=[rank + 2], end=[2 * rank]),
],
axis=0,
)


def _topi_resize3d_roi_from_onnx_ncdhw_spatial(roi_spatial: list[float]) -> list[float]:
"""Reorder spatial ROI for NCDHW ONNX layout to TOPI resize3d convention.

ONNX spatial slice after dropping N/C is ordered (D, H, W) for starts then ends.
TOPI ``resize3d`` with layout NCDHW expects
``(start_w, start_h, start_d, end_w, end_h, end_d)`` (see topi/image/resize.py).
"""
if len(roi_spatial) != 6:
return roi_spatial
d0, h0, w0, d1, h1, w1 = roi_spatial
return [w0, h0, d0, w1, h1, d1]


def _emit_resize_topi_dynamic_roi(
bb: relax.BlockBuilder,
data: relax.Expr,
roi_spatial_vec: relax.Expr,
sizes_spatial: list,
rank: int,
topi_mode: str,
coord_mode: str,
rounding_method: str,
cubic_coeff_a: float,
exclude_outside: int,
extrapolation_value: float,
) -> relax.Expr:
"""Lower Resize with runtime ROI via TOPI, which supports Expr ROI."""
if rank == 3:

def resize1d_dyn(d, r, s0):
return topi.image.resize1d(
d,
(r[0], r[1]),
[s0],
"NCW",
topi_mode,
coord_mode,
rounding_method,
cubic_coeff_a,
exclude_outside,
extrapolation_value,
)

return bb.emit_te(resize1d_dyn, data, roi_spatial_vec, sizes_spatial[0])

if rank == 4:

def resize2d_dyn(d, r, s0, s1):
return topi.image.resize2d(
d,
(r[0], r[1], r[2], r[3]),
(s0, s1),
layout="NCHW",
method=topi_mode,
coordinate_transformation_mode=coord_mode,
rounding_method=rounding_method,
bicubic_alpha=cubic_coeff_a,
bicubic_exclude=exclude_outside,
extrapolation_value=extrapolation_value,
)

return bb.emit_te(resize2d_dyn, data, roi_spatial_vec, sizes_spatial[0], sizes_spatial[1])

def resize3d_dyn(d, r, s0, s1, s2):
# r is ONNX order (D,H,W) x2; TOPI expects (W,H,D) x2.
return topi.image.resize3d(
d,
(r[2], r[1], r[0], r[5], r[4], r[3]),
(s0, s1, s2),
layout="NCDHW",
method=topi_mode,
coordinate_transformation_mode=coord_mode,
rounding_method=rounding_method,
bicubic_alpha=cubic_coeff_a,
bicubic_exclude=exclude_outside,
extrapolation_value=extrapolation_value,
)

return bb.emit_te(
resize3d_dyn,
data,
roi_spatial_vec,
sizes_spatial[0],
sizes_spatial[1],
sizes_spatial[2],
)


class Resize(OnnxOpConverter):
"""Converts an onnx Resize node into an equivalent Relax expression."""

Expand All @@ -2654,36 +2753,39 @@ def _impl_v18(cls, bb, inputs, attr, params):

# Unpack inputs.
x = inputs[0]
roi = get_constant(inputs[1], params)
scales = get_constant(inputs[2], params)
sizes = get_constant(inputs[3], params)
roi = get_constant(inputs[1], params) if len(inputs) > 1 and inputs[1] is not None else None
scales = get_constant(inputs[2], params) if len(inputs) > 2 else None
sizes = get_constant(inputs[3], params) if len(inputs) > 3 else None
ndims = len(x.struct_info.shape)
assert ndims in (3, 4, 5), "Only resize1d/resize2d/resize3d are supported."

assert scales is None or sizes is None, (
"Only one of scales and sizes can be provided in Resize."
)

# Define relax implementation.
# ROI can be a static list (for relax.image.resize*) or dynamic tensor (TOPI path).
roi_static: list[float] | None = None
roi_dynamic_vec: relax.Expr | None = None
if roi is not None:
if isinstance(roi, relax.Constant):
roi = roi.data.numpy().tolist()
if len(roi) == 2 * ndims:
roi = roi[2:ndims] + roi[ndims + 2 : 2 * ndims]
elif len(roi) == 0:
roi = [0.0] * (2 * (ndims - 2))
roi_np = roi.data.numpy().tolist()
if len(roi_np) == 2 * ndims:
roi_static = roi_np[2:ndims] + roi_np[ndims + 2 : 2 * ndims]
elif len(roi_np) == 0:
roi_static = [0.0] * (2 * (ndims - 2))
elif len(roi_np) == 2 * (ndims - 2):
# Some exporters already provide spatial-only ROI.
roi_static = roi_np
else:
roi_static = roi_np
else:
roi = relax.op.concat(
[
relax.op.strided_slice(roi, axes=[0], begin=[2], end=[ndims]),
relax.op.strided_slice(roi, axes=[0], begin=[ndims + 2], end=[2 * ndims]),
],
axis=0,
roi_dynamic_vec = bb.normalize(
_onnx_resize_spatial_roi_vector(roi, ndims)
)
# TODO The backend C++ func resize2d does not support dynamic ROI for now.
raise NotImplementedError("Dynamic ROI is not supported in resize for now.")
else:
roi = [0.0] * (2 * (ndims - 2))
roi_static = [0.0] * (2 * (ndims - 2))

use_dynamic_roi = roi_dynamic_vec is not None

# Convert scales to sizes if needed.
if scales is not None:
Expand All @@ -2692,7 +2794,7 @@ def _impl_v18(cls, bb, inputs, attr, params):
elif isinstance(scales, relax.expr.ShapeExpr):
scales = [int(val.value) for val in scales.values]
else:
assert f"Type {type(scales)} for scale is currently unsupported."
raise ValueError(f"Type {type(scales)} for scale is currently unsupported.")
sizes = []

for i, dim in enumerate(x.struct_info.shape):
Expand All @@ -2704,13 +2806,28 @@ def _impl_v18(cls, bb, inputs, attr, params):
elif isinstance(sizes, relax.expr.ShapeExpr):
sizes = [int(val.value) for val in sizes.values][2:]
else:
assert f"Type {type(sizes)} for size is currently unsupported."
raise ValueError(f"Type {type(sizes)} for size is currently unsupported.")

if use_dynamic_roi:
return _emit_resize_topi_dynamic_roi(
bb,
x,
roi_dynamic_vec,
sizes,
ndims,
topi_mode,
coord_mode,
rounding_method,
cubic_coeff_a,
exclude_outside,
extrapolation_value,
)

if ndims == 3:
return bb.emit_te(
topi.image.resize1d,
x,
roi,
roi_static,
sizes,
"NCW",
topi_mode,
Expand All @@ -2724,7 +2841,7 @@ def _impl_v18(cls, bb, inputs, attr, params):
return relax.op.image.resize2d(
x,
size=relax.ShapeExpr(sizes),
roi=roi,
roi=roi_static,
layout="NCHW",
method=relax_mode,
coordinate_transformation_mode=coord_mode,
Expand All @@ -2734,10 +2851,11 @@ def _impl_v18(cls, bb, inputs, attr, params):
extrapolation_value=extrapolation_value,
)
else: # ndims == 5
roi3d = _topi_resize3d_roi_from_onnx_ncdhw_spatial(roi_static)
return relax.op.image.resize3d(
x,
size=relax.ShapeExpr(sizes),
roi=roi,
roi=roi3d,
layout="NCDHW",
method=relax_mode,
coordinate_transformation_mode=coord_mode,
Expand Down
58 changes: 58 additions & 0 deletions tests/python/relax/test_frontend_onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -3268,6 +3268,64 @@ def test_resize(with_roi, roi_list, with_constant):
check_correctness(model)


def test_resize_dynamic_roi_tf_crop_and_resize():
"""ROI is a graph input (not initializer), lowered through TOPI dynamic-ROI path."""
resize_node = helper.make_node(
"Resize",
["X", "roi", "scales"],
["Y"],
mode="linear",
coordinate_transformation_mode="tf_crop_and_resize",
)
graph = helper.make_graph(
[resize_node],
"resize_dynamic_roi",
inputs=[
helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 3, 32, 32]),
helper.make_tensor_value_info("roi", TensorProto.FLOAT, [8]),
],
initializer=[
helper.make_tensor("scales", TensorProto.FLOAT, [4], [1.0, 1.0, 2.0, 2.0]),
],
outputs=[
helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 3, 64, 64]),
],
)
model = helper.make_model(graph, producer_name="resize_dynamic_roi")
check_correctness(model, atol=1e-5)


def test_resize_dynamic_roi_3d_tf_crop_and_resize():
"""5-D NCDHW: ROI is a graph input; covers dynamic-ROI TOPI resize3d path."""
resize_node = helper.make_node(
"Resize",
["X", "roi", "scales"],
["Y"],
mode="linear",
coordinate_transformation_mode="tf_crop_and_resize",
)
graph = helper.make_graph(
[resize_node],
"resize_dynamic_roi_3d",
inputs=[
helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 1, 3, 4, 5]),
helper.make_tensor_value_info("roi", TensorProto.FLOAT, [10]),
],
initializer=[
helper.make_tensor("scales", TensorProto.FLOAT, [5], [1.0, 1.0, 2.0, 2.0, 2.0]),
],
outputs=[
helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 1, 6, 8, 10]),
],
)
model = helper.make_model(graph, producer_name="resize_dynamic_roi_3d")
# Use a valid full-tensor ROI so ORT and TOPI agree on tf_crop_and_resize (random ROI
# can hit extrapolation / numerical differences across runtimes).
x_np = rg.standard_normal((1, 1, 3, 4, 5)).astype(np.float32)
roi_np = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1], dtype=np.float32)
check_correctness(model, opset=18, atol=1e-5, inputs={"X": x_np, "roi": roi_np})


def test_resize_nd_sizes():
cases = [
("resize1d", [1, 1, 4], [1, 1, 7]),
Expand Down
Loading