Skip to content

Commit

Permalink
Add CoreML ML Program Resize (#21370)
Browse files Browse the repository at this point in the history
### Description
<!-- Describe your changes. -->
Add CoreML ML Program Resize
- refactor existing logic to try and simplify and share between
NeuralNetwork and MLProgram checks
- add handling for some new attributes
- antialias and axes - should have been done when setting the CoreML EP
max opset to 21

### Motivation and Context
<!-- - Why is this change required? What problem does it solve?
- If it fixes an open issue, please link to the issue here. -->
Support priority models
  • Loading branch information
skottmckay authored Jul 19, 2024
1 parent 6ffaaeb commit 34cd2e8
Show file tree
Hide file tree
Showing 13 changed files with 671 additions and 199 deletions.
18 changes: 6 additions & 12 deletions onnxruntime/core/providers/coreml/builders/helper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -50,8 +50,8 @@ bool IsNodeSupported(const Node& node, const OpBuilderInputParams& input_params,
}
}

bool IsInputSupported(const Node& node, const NodeArg& input,
const OpBuilderInputParams& input_params, const logging::Logger& logger) {
bool IsInputSupported(const Node& node, const NodeArg& input, const OpBuilderInputParams& input_params,
const logging::Logger& logger, bool allow_empty_input) {
if (!input.Exists()) {
// optional input that is not provided
return true;
Expand Down Expand Up @@ -84,16 +84,10 @@ bool IsInputSupported(const Node& node, const NodeArg& input,
return false;
}

if (dim == 0) {
if (node.OpType() == "Resize" && &input == node.InputDefs()[1]) {
// one special case. Resize 'roi' input was originally a required input but is rarely used.
// ROI is not supported in the CoreML implementation so we will ignore the value, but is often added
// (at least in the unit tests) as an initializer with shape {0}.
} else {
LOGS(logger, WARNING) << "CoreML does not support shapes with dimension values of 0. Input:" << input_name
<< ", shape: " << Shape2String(shape);
return false;
}
if (dim == 0 && !allow_empty_input) {
LOGS(logger, WARNING) << "CoreML does not support shapes with dimension values of 0. Input:" << input_name
<< ", shape: " << Shape2String(shape);
return false;
}
}

Expand Down
3 changes: 2 additions & 1 deletion onnxruntime/core/providers/coreml/builders/helper.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,8 @@ OpBuilderInputParams MakeOpBuilderParams(const GraphViewer& graph_viewer,
const IOpBuilder* GetOpBuilder(const Node& node);

bool IsInputSupported(const Node& node, const NodeArg& node_arg, const OpBuilderInputParams& input_params,
const logging::Logger& logger);
const logging::Logger& logger,
bool allow_empty_input = false);

bool IsNodeSupported(const Node& node, const OpBuilderInputParams& input_params, const logging::Logger& logger);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ bool BaseOpBuilder::IsOpSupported(const Node& node, const OpBuilderInputParams&
bool BaseOpBuilder::HasSupportedInputs(const Node& node, const OpBuilderInputParams& input_params,
const logging::Logger& logger) const {
for (const auto* input : node.InputDefs()) {
if (!IsInputSupported(node, *input, input_params, logger)) {
if (!IsInputSupported(node, *input, input_params, logger, allow_empty_tensor_as_input_)) {
return false;
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,10 @@ class BaseOpBuilder : public IOpBuilder {
void AddInitializersToSkip(ModelBuilder& /*model_builder*/, const Node& /*node*/) const override {}

protected:
explicit BaseOpBuilder(bool allow_empty_tensor_as_input = false)
: allow_empty_tensor_as_input_(allow_empty_tensor_as_input) {
}

// currently we only support float
static bool IsInputFloat(const Node& node, size_t idx, const OpBuilderInputParams& input_params,
const logging::Logger& logger);
Expand All @@ -50,6 +54,8 @@ class BaseOpBuilder : public IOpBuilder {

virtual Status AddToModelBuilderImpl(ModelBuilder& model_builder, const Node& node,
const logging::Logger& logger) const = 0;

const bool allow_empty_tensor_as_input_; // some operators can handle ignoring an empty tensor as input
};

} // namespace coreml
Expand Down
607 changes: 455 additions & 152 deletions onnxruntime/core/providers/coreml/builders/impl/resize_op_builder.cc

Large diffs are not rendered by default.

13 changes: 11 additions & 2 deletions onnxruntime/core/providers/coreml/builders/model_builder.h
Original file line number Diff line number Diff line change
Expand Up @@ -141,8 +141,17 @@ class ModelBuilder {
// so we don't do a copy of the original initializer into the model.
void AddInitializerToSkip(const std::string& tensor_name);

// There are some input which will not be used, add it to a list which will not
// be added to CoreML model, since CoreML does not like input unused
/// <summary>
/// Skip a non-initializer value, that is not used in the CoreML model, but was an input to a supported node.
///
/// This is for a rare edge case where a value is an input to a node but is empty/unused, as the
/// CoreML model requires all model inputs to be consumed.
/// </summary>
/// <remarks>
/// The only known use case for this currently is Resize, and that is largely due to how the unit tests are
/// setup rather than something you'd expect to see in a real model.
/// See ResizeOpBuilder::AddInitializersToSkip for more details.
/// </remarks>
void AddInputToSkip(const std::string& input_name);

const std::string& GetUniqueName(const std::string& base_name);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ CoreMLExecutionProvider::CoreMLExecutionProvider(uint32_t coreml_flags)
: IExecutionProvider{onnxruntime::kCoreMLExecutionProvider},
coreml_flags_(coreml_flags),
coreml_version_(coreml::util::CoreMLVersion()) {
LOGS_DEFAULT(VERBOSE) << "CoreML version: " << coreml_version_;
if (coreml_version_ < MINIMUM_COREML_VERSION) {
LOGS_DEFAULT(ERROR) << "CoreML EP is not supported on this platform.";
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#include "core/graph/graph_viewer.h"
#include "core/optimizer/initializer.h"
#include "core/providers/common.h"
#include "core/providers/utils.h"
#include "core/providers/shared/utils/utils.h"
#include "core/providers/nnapi/nnapi_builtin/builders/helper.h"
#include "core/providers/nnapi/nnapi_builtin/builders/model_builder.h"
Expand Down Expand Up @@ -251,14 +252,34 @@ bool ResizeOpBuilder::IsOpSupportedImpl(const GraphViewer& graph_viewer, const N
const Initializer unpacked_tensor(*scales);
auto scales_data = unpacked_tensor.DataAsSpan<float>();
input_is_nchw = scales_data[1] == 1.0F;
float const scale_n = scales_data[0];
float const scale_c = input_is_nchw ? scales_data[1] : scales_data[3];
const float scale_n = scales_data[0];
const float scale_c = input_is_nchw ? scales_data[1] : scales_data[3];
const float scale_h = input_is_nchw ? scales_data[2] : scales_data[1];
const float scale_w = input_is_nchw ? scales_data[3] : scales_data[2];

if (scale_n != 1.0f || scale_c != 1.0f) {
LOGS_DEFAULT(VERBOSE) << "Scales of N/C channel should be 1"
<< "Resize of N/C channels are not supported"
<< ", scale_n, " << scale_n << ", scale_c, " << scale_c;
return false;
}

// if downsampling the input size must be evenly divisible by the output size to match the onnx output
if (scale_h < 1.0f || scale_w < 1.0f) {
// we also require input_shape to be known to check
auto h_in = input_is_nchw ? input_shape[2] : input_shape[1];
auto w_in = input_is_nchw ? input_shape[3] : input_shape[2];
if (h_in == 0 || w_in == 0) {
LOGS_DEFAULT(VERBOSE) << "Input H and W must be known to downsample with scales";
return false;
}

if (!utils::IsScalingByAFactorOfN(h_in, scale_h) ||
!utils::IsScalingByAFactorOfN(w_in, scale_w)) {
LOGS_DEFAULT(VERBOSE) << "Input size must be evenly divisible by output size when downsampling";
return false;
}
}
} else {
const auto* sizes = graph_viewer.GetConstantInitializer(inputs[3].node_arg.Name());
if (!sizes) {
Expand Down
16 changes: 16 additions & 0 deletions onnxruntime/core/providers/utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,5 +23,21 @@ common::Status OutputOptionalWithoutDataHelper(const ONNX_NAMESPACE::TypeProto&
return Status::OK();
}
#endif

bool IsScalingByAFactorOfN(int64_t n, float scale) {
bool is_factor = false;
if (scale > 0.f && scale < 1.f) {
const double factor = 1.0 / scale;
const double factor_rounded = std::round(factor);
constexpr double epsilon = 1.0e-4; // arbitrarily small enough
if (std::abs(factor - factor_rounded) < epsilon) {
// result is integer. check if a factor of n
const int64_t factor_i = static_cast<int64_t>(factor_rounded);
is_factor = n % factor_i == 0;
}
}

return is_factor;
}
} // namespace utils
} // namespace onnxruntime
5 changes: 5 additions & 0 deletions onnxruntime/core/providers/utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,5 +15,10 @@ common::Status OutputOptionalWithoutDataHelper(const ONNX_NAMESPACE::TypeProto&
OpKernelContext* context, int output_index);
#endif

/// <summary>
/// Check if the reciprocal of 'scale' is a factor of 'n'.
/// e.g. a scale of 0.5 is 1/2, the reciprocal is 2, and 2 is a factor of any even number.
/// </summary>
bool IsScalingByAFactorOfN(int64_t n, float scale);
} // namespace utils
} // namespace onnxruntime
21 changes: 20 additions & 1 deletion onnxruntime/core/providers/xnnpack/tensor/resize.cc
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
#include "core/framework/op_kernel.h"
#include "core/optimizer/initializer.h"
#include "core/providers/xnnpack/xnnpack_init.h"
#include "core/providers/utils.h"

namespace onnxruntime {
namespace xnnpack {
Expand Down Expand Up @@ -68,9 +69,27 @@ bool Resize::IsOnnxNodeSupported(const NodeUnit& node_unit,
InlinedVector<float> scale(4, 1.0F);
if (scale_tensor) {
const Initializer scale_val(*scale_tensor, node_unit.ModelPath());
if (scale_val.DataAsSpan<float>()[1] != 1.0F) {
const auto scales = scale_val.DataAsSpan<float>();
if (scales[1] != 1.0F) {
break;
}

// downsampling output seems to require the output size to be a factor of the input to match ONNX
if (scales[2] < 1.0f || scales[3] < 1.0f) {
// we also require input_shape to be known to check
int64_t h_in = x_shape->dim(2).dim_value();
int64_t w_in = x_shape->dim(3).dim_value();
if (h_in < 0 || w_in < 0) {
break;
}

float scale_h = scales[2];
float scale_w = scales[3];
if (!utils::IsScalingByAFactorOfN(h_in, scale_h) ||
!utils::IsScalingByAFactorOfN(w_in, scale_w)) {
break;
}
}
}

if (size_tensor) {
Expand Down
Loading

0 comments on commit 34cd2e8

Please sign in to comment.