Skip to content

Commit

Permalink
Update CI dependencies and fix tests; generate opset 20 (#1335)
Browse files Browse the repository at this point in the history
1. Update ONNX, ORT version in CI
2. Pin IR version in run_converter_test to 9 because ONNX runtime does
not support IRv10 yet. Need a better mechanism to handle the IR version
change in tests.
3. Generate opset 20 for onnxscript
4. Skip opset 21 tests
5. Fixes #1318

---------

Co-authored-by: Justin Chu <[email protected]>
Co-authored-by: Justin Chu <[email protected]>
  • Loading branch information
3 people authored Apr 2, 2024
1 parent d96dcf2 commit e87e05d
Show file tree
Hide file tree
Showing 28 changed files with 1,173 additions and 298 deletions.
4 changes: 2 additions & 2 deletions noxfile.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
"jinja2",
"numpy==1.24.4",
"typing_extensions",
"beartype!=0.16.0",
"beartype==0.17.2",
"types-PyYAML",
"expecttest==0.1.6",
"hypothesis",
Expand All @@ -26,7 +26,7 @@
"pytest!=7.1.0",
"pyyaml",
)
ONNX = "onnx==1.14.1"
ONNX = "onnx==1.15"
ONNX_RUNTIME = "onnxruntime==1.16.1"
PYTORCH = "torch==2.1.0"
TORCHVISON = "torchvision==0.16"
Expand Down
64 changes: 7 additions & 57 deletions onnxscript/_internal/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,17 +5,14 @@
from __future__ import annotations

import numbers
from typing import Any, Iterable, Optional, Sequence
from typing import Optional, Sequence

import numpy as np
import onnx
import onnx.helper
from onnx import FunctionProto, ModelProto, TensorProto, ValueInfoProto

from onnxscript import tensor

# pylint: enable=unused-import, ungrouped-imports


def external_tensor(
name: str,
Expand All @@ -26,7 +23,7 @@ def external_tensor(
length: Optional[int] = None,
checksum: Optional[str] = None,
basepath: Optional[str] = None,
) -> TensorProto:
) -> onnx.TensorProto:
"""Create a TensorProto referencing externally stored tensor-data.
Args:
Expand All @@ -44,11 +41,11 @@ def external_tensor(
See https://github.com/onnx/onnx/blob/main/docs/ExternalData.md for more details.
"""
tensor_proto = TensorProto()
tensor_proto = onnx.TensorProto()
tensor_proto.name = name
tensor_proto.data_type = data_type
tensor_proto.dims.extend(dims)
tensor_proto.data_location = TensorProto.EXTERNAL
tensor_proto.data_location = onnx.TensorProto.EXTERNAL

def add(k, v):
entry = tensor_proto.external_data.add()
Expand All @@ -74,17 +71,17 @@ def value_to_type_proto(val):
shape = val.shape
return onnx.helper.make_tensor_type_proto(elem_type, shape)
if isinstance(val, int):
return onnx.helper.make_tensor_type_proto(TensorProto.INT32, [])
return onnx.helper.make_tensor_type_proto(onnx.TensorProto.INT32, [])
if isinstance(val, (float, np.float32)):
return onnx.helper.make_tensor_type_proto(TensorProto.FLOAT, [])
return onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, [])
if isinstance(val, list):
if len(val) > 0:
return onnx.helper.make_sequence_type_proto(value_to_type_proto(val[0]))
# Edge-case. Cannot determine a suitable ONNX type for an empty list.
# Should be using a typed-value instead.
# Treated as a sequence of tensors of float-type.
return onnx.helper.make_sequence_type_proto(
onnx.helper.make_tensor_type_proto(TensorProto.FLOAT, None)
onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, None)
)
if isinstance(val, numbers.Number):
nparray = np.array(val)
Expand All @@ -102,50 +99,3 @@ def values_to_value_infos(name_values):
for (name, val) in name_values
if val is not None
]


def make_model_from_function_proto(
function_proto: FunctionProto,
function_opset_version: int,
input_value_infos: Sequence[ValueInfoProto],
output_value_infos: Sequence[ValueInfoProto],
**attrs: Any,
) -> ModelProto:
"""Creates a model containing a single call to a given
function with input and output value_infos, etc.
Args:
function_proto (FunctionProto): function proto
representing a single call
function_opset_version (int): function_proto's version
input_value_infos (list of ValueInfoProto): function's input
output_value_infos (list of ValueInfoProto): function's output
**attrs (dict): the attributes of the node for the function
Returns:
ModelProto
"""

input_names = [vi.name for vi in input_value_infos]
output_names = [vi.name for vi in output_value_infos]
node = onnx.helper.make_node(
function_proto.name,
input_names,
output_names,
domain=function_proto.domain,
**attrs,
)
graph = onnx.helper.make_graph([node], "node_graph", input_value_infos, output_value_infos)
model_proto_opset: Iterable[onnx.OperatorSetIdProto] = function_proto.opset_import
if all(o.domain != function_proto.domain for o in model_proto_opset):
model_proto_opset = [
*model_proto_opset,
onnx.helper.make_opsetid(function_proto.domain, function_opset_version),
]
model = onnx.helper.make_model(
graph,
functions=[function_proto],
producer_name="onnxscript",
opset_imports=model_proto_opset,
)
return model
33 changes: 2 additions & 31 deletions onnxscript/backend/onnx_export_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,7 @@ def skip(pattern: str | Pattern, reason: str, *, condition: bool = True):
r"^test_range_int32_type_negative_delta_expanded",
"Change when the converter supports support something like 'while i < n and cond:'",
),
skip(r"^test_ai_onnx_ml_label_encoder", "ONNX Runtime does not support Opset 21 at 1.17"),
)


Expand Down Expand Up @@ -247,37 +248,7 @@ def test_export2python_produces_correct_onnx_script_model(
functions = extract_functions(backend_test.name, code, self.test_folder)
main_function = functions[f"bck_{backend_test.name}"]
self.assertIsNotNone(main_function)
proto = main_function.to_model_proto()

# Opset may be different when an binary operator is used.
if backend_test.onnx_model.ir_version != proto.ir_version:
if (
not backend_test.name.startswith( # pylint: disable=too-many-boolean-expressions
"test_add"
)
and not backend_test.name.startswith("test_and")
and not backend_test.name.startswith("test_div")
and not backend_test.name.startswith("test_equal")
and not backend_test.name.startswith("test_greater")
and not backend_test.name.startswith("test_less")
and not backend_test.name.startswith("test_matmul")
and not backend_test.name.startswith("test_mod")
and not backend_test.name.startswith("test_mul")
and not backend_test.name.startswith("test_not")
and not backend_test.name.startswith("test_or")
and not backend_test.name.startswith("test_pow")
and not backend_test.name.startswith("test_sub")
and (backend_test.onnx_model.ir_version, proto.ir_version)
not in {(3, 4), (5, 6)}
):
# Unexpected behavior for old opsets
raise AssertionError(
f"Incompatible ir_version {(backend_test.onnx_model.ir_version)} !="
f" {(proto.ir_version)}\n"
f"{backend_test.onnx_model}\n"
f"-----\n"
f"{proto}"
)
proto = main_function.to_model_proto(ir_version=backend_test.onnx_model.ir_version)

try:
session = ort.InferenceSession(
Expand Down
8 changes: 8 additions & 0 deletions onnxscript/function_libs/torch_lib/graph_building_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
# mypy: disable-error-code="arg-type,type-arg,valid-type"
from __future__ import annotations

import os
import unittest

import torch
Expand All @@ -11,8 +12,11 @@
import onnxscript.testing
from onnxscript import FLOAT, evaluator
from onnxscript import opset18 as op
from onnxscript._internal import version_utils
from onnxscript.function_libs.torch_lib import graph_building, ops

IS_WINDOWS = os.name == "nt"


class TestTorchScriptTracingEvaluator(unittest.TestCase):
def setUp(self):
Expand Down Expand Up @@ -138,6 +142,10 @@ def test_add_initializer_allows_adding_the_same_tensor_twice_using_same_name(sel
graph.add_initializer("x", x_tensor)


@unittest.skipIf(
IS_WINDOWS and version_utils.torch_older_than("2.3"),
"dynamo_export not supported on Windows in PyTorch<2.3",
)
class TestModelSaving(unittest.TestCase):
def test_save_initializer_to_files_for_large_model(self):
class MLP(torch.nn.Module):
Expand Down
2 changes: 1 addition & 1 deletion onnxscript/irbuilder.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def _format(seq: Sequence[Any], prefix: str, sep: str, suffix: str, formatter=st
return prefix + sep.join([formatter(x) for x in seq]) + suffix


def select_ir_version(version: int, domain: str = ""):
def select_ir_version(version: int, domain: str = "") -> int:
"""Selects a suitable ONNX ir_version for a given opset version."""
if domain == "":
domain = "ai.onnx"
Expand Down
14 changes: 14 additions & 0 deletions onnxscript/onnx_opset/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,11 @@
from onnxscript.onnx_opset._impl.opset17 import Opset17
from onnxscript.onnx_opset._impl.opset18 import Opset18
from onnxscript.onnx_opset._impl.opset19 import Opset19
from onnxscript.onnx_opset._impl.opset20 import Opset20
from onnxscript.onnx_opset._impl.opset_ai_onnx_ml1 import Opset_ai_onnx_ml1
from onnxscript.onnx_opset._impl.opset_ai_onnx_ml2 import Opset_ai_onnx_ml2
from onnxscript.onnx_opset._impl.opset_ai_onnx_ml3 import Opset_ai_onnx_ml3
from onnxscript.onnx_opset._impl.opset_ai_onnx_ml4 import Opset_ai_onnx_ml4
from onnxscript.onnx_opset._impl.opset_ai_onnx_preview_training1 import (
Opset_ai_onnx_preview_training1,
)
Expand All @@ -65,9 +67,11 @@
"opset17",
"opset18",
"opset19",
"opset20",
"opset_ai_onnx_ml1",
"opset_ai_onnx_ml2",
"opset_ai_onnx_ml3",
"opset_ai_onnx_ml4",
"opset_ai_onnx_preview_training1",
]

Expand Down Expand Up @@ -97,9 +101,11 @@
opset17 = Opset17()
opset18 = Opset18()
opset19 = Opset19()
opset20 = Opset20()
opset_ai_onnx_ml1 = Opset_ai_onnx_ml1()
opset_ai_onnx_ml2 = Opset_ai_onnx_ml2()
opset_ai_onnx_ml3 = Opset_ai_onnx_ml3()
opset_ai_onnx_ml4 = Opset_ai_onnx_ml4()
opset_ai_onnx_preview_training1 = Opset_ai_onnx_preview_training1()
all_opsets: Mapping[Tuple[str, int], Opset] = {
(
Expand Down Expand Up @@ -178,6 +184,10 @@
"",
19,
): opset19,
(
"",
20,
): opset20,
(
"ai.onnx.ml",
1,
Expand All @@ -190,6 +200,10 @@
"ai.onnx.ml",
3,
): opset_ai_onnx_ml3,
(
"ai.onnx.ml",
4,
): opset_ai_onnx_ml4,
(
"ai.onnx.preview.training",
1,
Expand Down
Loading

0 comments on commit e87e05d

Please sign in to comment.